Merge branch 'pm-cpufreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 20 Feb 2017 13:23:00 +0000 (14:23 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 20 Feb 2017 13:23:00 +0000 (14:23 +0100)
* pm-cpufreq: (28 commits)
  MAINTAINERS: cpufreq: add bmips-cpufreq.c
  cpufreq: CPPC: add ACPI_PROCESSOR dependency
  cpufreq: make ti-cpufreq explicitly non-modular
  cpufreq: Do not clear real_cpus mask on policy init
  cpufreq: dt: Don't use generic platdev driver for ti-cpufreq platforms
  cpufreq: ti: Add cpufreq driver to determine available OPPs at runtime
  Documentation: dt: add bindings for ti-cpufreq
  cpufreq: qoriq: Don't look at clock implementation details
  cpufreq: qoriq: add ARM64 SoCs support
  cpufreq: brcmstb-avs-cpufreq: remove unnecessary platform_set_drvdata()
  cpufreq: s3c2416: double free on driver init error path
  MIPS: BMIPS: enable CPUfreq
  cpufreq: bmips-cpufreq: CPUfreq driver for Broadcom's BMIPS SoCs
  BMIPS: Enable prerequisites for CPUfreq in MIPS Kconfig.
  MIPS: BMIPS: Update defconfig
  cpufreq: Fix typos in comments
  cpufreq: intel_pstate: Calculate guaranteed performance for HWP
  cpufreq: intel_pstate: Make HWP limits compatible with legacy
  cpufreq: intel_pstate: Lower frequency than expected under no_turbo
  cpufreq: intel_pstate: Operation mode control from sysfs
  ...

363 files changed:
Documentation/media/uapi/cec/cec-func-close.rst
Documentation/media/uapi/cec/cec-func-ioctl.rst
Documentation/media/uapi/cec/cec-func-open.rst
Documentation/media/uapi/cec/cec-func-poll.rst
Documentation/media/uapi/cec/cec-intro.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
Documentation/media/uapi/cec/cec-ioc-dqevent.rst
Documentation/media/uapi/cec/cec-ioc-g-mode.rst
Documentation/media/uapi/cec/cec-ioc-receive.rst
MAINTAINERS
Makefile
arch/arc/kernel/unaligned.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/imx1.dtsi
arch/arm/boot/dts/imx23.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx28.dtsi
arch/arm/boot/dts/imx31.dtsi
arch/arm/boot/dts/imx35.dtsi
arch/arm/boot/dts/imx50.dtsi
arch/arm/boot/dts/imx51.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/imx6dl.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/imx6ul.dtsi
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/orion5x-linkstation-lschl.dts [moved from arch/arm/boot/dts/orion5x-lschl.dts with 98% similarity]
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/configs/ezx_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/kernel/ptrace.c
arch/arm/mach-imx/mmdc.c
arch/arm/mm/fault.c
arch/arm/mm/fault.h
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/crypto/aes-modes.S
arch/powerpc/Kconfig
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/module.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/stackprotector.h [deleted file]
arch/powerpc/include/asm/xics.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/sstate.c
arch/sparc/kernel/traps_64.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/include/asm/microcode.h
arch/x86/include/asm/processor.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/hpet.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kvm/x86.c
arch/x86/mm/dump_pagetables.c
arch/x86/platform/efi/efi_64.c
arch/xtensa/kernel/setup.c
block/blk-lib.c
crypto/algapi.c
crypto/algif_aead.c
drivers/acpi/nfit/core.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/base/firmware_class.c
drivers/base/memory.c
drivers/base/power/runtime.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_mips.c
drivers/char/hw_random/core.c
drivers/crypto/ccp/ccp-dev-v5.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-dmaengine.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/qat_hal.c
drivers/dma/cppi41.c
drivers/dma/pl330.c
drivers/firmware/efi/libstub/fdt.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/dispnv04/hw.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_led.h
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/i2c/busses/i2c-piix4.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/health/max30100.c
drivers/iio/humidity/dht11.c
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/input/misc/uinput.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/wm97xx-core.c
drivers/irqchip/irq-keystone.c
drivers/irqchip/irq-mxs.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-rq.c
drivers/media/cec/cec-adap.c
drivers/mmc/host/mmci.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/cavium/thunder/thunder_xcv.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/hamradio/mkiss.c
drivers/net/hyperv/netvsc.c
drivers/net/loopback.c
drivers/net/macvtap.c
drivers/net/phy/mdio-bcm-iproc.c
drivers/net/phy/micrel.c
drivers/net/phy/phy_device.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/pegasus.c
drivers/net/usb/rtl8150.c
drivers/net/usb/sierra_net.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/net/xen-netfront.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pfn_devs.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/msi.c
drivers/pci/pci.c
drivers/pci/pcie/aspm.c
drivers/pinctrl/berlin/berlin-bg4ct.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/fixed.c
drivers/regulator/twl6030-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-jz4740.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/aacraid/comminit.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/virtio_scsi.c
drivers/staging/greybus/timesync_platform.c
drivers/staging/lustre/lustre/llite/llite_mmap.c
drivers/target/target_core_device.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/usb/core/quirks.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcserial.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/vhost.c
drivers/virtio/virtio_ring.c
fs/btrfs/compression.c
fs/btrfs/ioctl.c
fs/cifs/readdir.c
fs/dax.c
fs/fscache/cookie.c
fs/fscache/netfs.c
fs/fscache/object.c
fs/iomap.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/proc/page.c
fs/pstore/ram.c
include/asm-generic/export.h
include/drm/drmP.h
include/drm/drm_connector.h
include/linux/buffer_head.h
include/linux/can/core.h
include/linux/cpuhotplug.h
include/linux/cpumask.h
include/linux/export.h
include/linux/fscache-cache.h
include/linux/hyperv.h
include/linux/irq.h
include/linux/log2.h
include/linux/memory_hotplug.h
include/linux/module.h
include/linux/netdevice.h
include/linux/percpu-refcount.h
include/net/cipso_ipv4.h
include/net/ipv6.h
include/net/lwtunnel.h
include/net/sock.h
include/target/target_core_base.h
include/uapi/linux/ethtool.h
include/uapi/linux/seg6.h
include/uapi/rdma/ib_user_verbs.h
init/Kconfig
kernel/cgroup.c
kernel/events/core.c
kernel/irq/irqdomain.c
kernel/module.c
kernel/stacktrace.c
kernel/time/tick-sched.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_kprobe.c
kernel/ucount.c
mm/filemap.c
mm/kasan/report.c
mm/memory_hotplug.c
mm/shmem.c
mm/slub.c
mm/zswap.c
net/can/af_can.c
net/can/af_can.h
net/can/bcm.c
net/can/gw.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/dsa/dsa2.c
net/ethernet/eth.c
net/ipv4/cipso_ipv4.c
net/ipv4/igmp.c
net/ipv4/ip_sockglue.c
net/ipv4/ping.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/exthdrs.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/mcast.c
net/ipv6/seg6_hmac.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/fils_aead.c
net/mac80211/mesh.c
net/packet/af_packet.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sctp/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/wireless/nl80211.c
scripts/Makefile.build
scripts/genksyms/genksyms.c
scripts/kallsyms.c
scripts/mod/modpost.c
security/selinux/hooks.c
sound/core/seq/seq_memory.c
sound/core/seq/seq_queue.c
sound/pci/hda/patch_hdmi.c
sound/usb/line6/driver.c
tools/objtool/arch/x86/decode.c
tools/perf/builtin-diff.c
tools/perf/ui/hist.c
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/hist.c
tools/perf/util/hist.h

index 8267c31b317dc9ac8e86007c56de62c0f79ebcba..895d9c2d1c04302f9452350b7fffe3678af1ee61 100644 (file)
@@ -33,11 +33,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 Closes the cec device. Resources associated with the file descriptor are
 freed. The device configuration remain unchanged.
 
index 9e8dbb118d6a3d518391caa1a086f05a545b57c2..7dcfd178fb243c33f6ba23bb331236496018809c 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 argument ``fd`` must be an open file descriptor.
 
index af3f5b5c24c646dfe61dea9b8afed05a9cb84228..0304388cd15976a7ff00eb46ca9bc9ef5f0a3c94 100644 (file)
@@ -46,11 +46,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To open a cec device applications call :c:func:`open()` with the
 desired device name. The function has no side effects; the device
 configuration remain unchanged.
index cfb73e6027a55734747255df676619e2e55441f7..6a863cfda6e05172be7e60e930fbd0ff885b3ce4 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 With the :c:func:`poll()` function applications can wait for CEC
 events.
 
index 4a19ea5323a97d6ac577b62caf015e6cf2653f63..07ee2b8f89d6a320d66f1a9ed3d20977aa60c34b 100644 (file)
@@ -3,11 +3,6 @@
 Introduction
 ============
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 HDMI connectors provide a single pin for use by the Consumer Electronics
 Control protocol. This protocol allows different devices connected by an
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 give userspace access to the CEC adapter. The
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
+
+In order to check the support and test it, it is suggested to download
+the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
+provides three tools to handle CEC:
+
+- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
+  and monitor CEC messages.
+
+- cec-compliance: does a CEC compliance test of a remote CEC device to
+  determine how compliant the CEC implementation is.
+
+- cec-follower: emulates a CEC follower.
index 2b0ddb14b280e6fddcd8aae1bc67f60560e8a9e4..a0e961f11017c74d9d8ca36fa57124910cd6dbdd 100644 (file)
@@ -29,11 +29,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
 struct :c:type:`cec_caps`. The driver fills the structure and
index b878637e91b3d8dc78aebf0ccb94823ef6b96ac5..09f09bbe28d4ffb1d5b3291221c4b678e4e38c6e 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
index 3357deb43c85afb7c577a2fe34127f210903ae73..a3cdc75cec3e3c0754027869cee43ad8ca019260 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current physical address applications call
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 driver stores the physical address.
index e256c6605de7f7b7242a1ad4fbd303d5b5309369..6e589a1fae1704c0eea188c662537d37ea6142b5 100644 (file)
@@ -30,11 +30,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 CEC devices can send asynchronous events. These can be retrieved by
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 non-blocking mode and no event is pending, then it will return -1 and
index 4f5818b9d27724163781713a14241dca38dca0cb..e4ded9df0a84a3be52a4f2a5fb09359e1d4fe285 100644 (file)
@@ -31,11 +31,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 applications from stepping on each others toes it must be possible to
 obtain exclusive access to the CEC adapter. This ioctl sets the
index bdf015b1d1dc0a677b89cdc05b7637687370a496..dc2adb391c0a2d2183e3e00bc90af520e727c6dd 100644 (file)
@@ -34,11 +34,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To receive a CEC message the application has to fill in the
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
index 4d879654798df83a179b4ad1bff1277b85bd63e9..d4ac248ec31b5adbf6b8a301357d453218128592 100644 (file)
@@ -1091,7 +1091,7 @@ F:        arch/arm/boot/dts/aspeed-*
 F:     drivers/*/*aspeed*
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1773,7 +1773,7 @@ F:        drivers/soc/renesas/
 F:     include/linux/soc/renesas/
 
 ARM/SOCFPGA ARCHITECTURE
-M:     Dinh Nguyen <dinguyen@opensource.altera.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
 F:     arch/arm/mach-socfpga/
 F:     arch/arm/boot/dts/socfpga*
@@ -1783,7 +1783,7 @@ W:        http://www.rocketboards.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
-M:     Dinh Nguyen <dinguyen@opensource.altera.com>
+M:     Dinh Nguyen <dinguyen@kernel.org>
 S:     Maintained
 F:     drivers/clk/socfpga/
 
@@ -2175,56 +2175,56 @@ F:      include/linux/atm*
 F:     include/uapi/linux/atm*
 
 ATMEL AT91 / AT32 MCI DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 S:     Maintained
 F:     drivers/mmc/host/atmel-mci.c
 
 ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/power/reset/at91-sama5d2_shdwc.c
 
 ATMEL SAMA5D2 ADC DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-iio@vger.kernel.org
 S:     Supported
 F:     drivers/iio/adc/at91-sama5d2_adc.c
 
 ATMEL Audio ALSA driver
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/atmel
 
 ATMEL XDMA DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org
 L:     dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/at_xdmac.c
 
 ATMEL I2C DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-i2c@vger.kernel.org
 S:     Supported
 F:     drivers/i2c/busses/i2c-at91.c
 
 ATMEL ISI DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 F:     drivers/media/platform/soc_camera/atmel-isi.c
 F:     include/media/atmel-isi.h
 
 ATMEL LCDFB DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/atmel_lcdfb.c
 F:     include/video/atmel_lcdc.h
 
 ATMEL MACB ETHERNET DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/net/ethernet/cadence/
 
@@ -2236,32 +2236,32 @@ S:      Supported
 F:     drivers/mtd/nand/atmel_nand*
 
 ATMEL SDMMC DRIVER
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-mmc@vger.kernel.org
 S:     Supported
 F:     drivers/mmc/host/sdhci-of-at91.c
 
 ATMEL SPI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 S:     Supported
 F:     drivers/spi/spi-atmel.*
 
 ATMEL SSC DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/misc/atmel-ssc.c
 F:     include/linux/atmel-ssc.h
 
 ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/misc/atmel_tclib.c
 F:     drivers/clocksource/tcb_clksrc.c
 
 ATMEL USBA UDC DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/usb/gadget/udc/atmel_usba_udc.*
@@ -9743,7 +9743,7 @@ S:        Maintained
 F:     drivers/pinctrl/pinctrl-at91.*
 
 PIN CONTROLLER - ATMEL AT91 PIO4
-M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-gpio@vger.kernel.org
 S:     Supported
@@ -10202,7 +10202,6 @@ F:      drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:     QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
-L:     ath9k-devel@lists.ath9k.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath9k
 S:     Supported
 F:     drivers/net/wireless/ath/ath9k/
@@ -13073,7 +13072,7 @@ F:      drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
 VIRTIO CONSOLE DRIVER
-M:     Amit Shah <amit.shah@redhat.com>
+M:     Amit Shah <amit@kernel.org>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
 F:     drivers/char/virtio_console.c
index 96b27a888285c5258dc57f666b2876fcece1a1aa..503dae1de8ef4d6175e9831bbc0256f7477f87d8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc8
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
        KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
        KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
index 91ebe382147f6d915a58f94027fa748ca8319f30..5f69c3bd59bba47babf92c97a9c7e5a0174c5648 100644 (file)
@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 
        /* clear any remanants of delay slot */
        if (delay_mode(regs)) {
-               regs->ret = regs->bta ~1U;
+               regs->ret = regs->bta ~1U;
                regs->status32 &= ~STATUS_DE_MASK;
        } else {
                regs->ret += state.instr_len;
index f10fe8526239552a676df4a4bfb1ae6a21b1aa41..01d178a2009f598f133e890a46c3e3e99efcc63c 100644 (file)
@@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
        orion5x-lacie-ethernet-disk-mini-v2.dtb \
        orion5x-linkstation-lsgl.dtb \
        orion5x-linkstation-lswtgl.dtb \
-       orion5x-lschl.dtb \
+       orion5x-linkstation-lschl.dtb \
        orion5x-lswsgl.dtb \
        orion5x-maxtor-shared-storage-2.dtb \
        orion5x-netgear-wnr854t.dtb \
index b792eee3899b25e7a7a206d16f5737f5fd631a9d..2ee40bc9ec21aafb9833bef482e344c2213273a9 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio1;
index ac2a9da62b6ce81e4130c44185300ac95c31b2ca..43ccbbf754a340ab552a10ae0578badc59287aa4 100644 (file)
        #size-cells = <1>;
 
        interrupt-parent = <&icoll>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio0;
index 831d09a28155c5caa0b26ee6be42c20986067536..acd475659156bd05a0cfa9c94eeb778578a480d6 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 9d8b5969ee3b0d9455afee96c666e77cae1cc713..b397384248f4b90876f8983ed95881e6caa1cb7d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 3aabf65a6a5224f5681d6ba189087a550824e964..d6a2190b60ef4d52a19d87e140d878c826a1b7ad 100644 (file)
        #size-cells = <1>;
 
        interrupt-parent = <&icoll>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &mac0;
index 85cd8be22f7155edae2d56ac5a99984427aa6131..23b0d2cf9acdcde0180b50b0b7635a52ddd92ecc 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                serial0 = &uart1;
index 9f40e6229189f3c6cb72c99bc11c351598e7f0e3..d0496c65cea2695772f0477b4d153c3bd3e6fa4d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index fe0221e4cbf7b108f81c2695507a9450653b53f8..ceae909e2201f729e8ed64a2f9baaa3103d3d968 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 33526cade73582766f68972301d349c72fccb9a6..1ee1d542d9ad088c0bb94a55d66829d6ac1d92b9 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index ca51dc03e327b3f89b3836ea8ba41d55359af58e..2e516f4985e4cd2e470f8ddb0195a34821a1d09d 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 1ade1951e620da007041882b467ebfbe3e530a7c..7aa120fbdc71ea0e4a2776312bc8aedfe5617f16 100644 (file)
 &gpio4 {
        gpio-ranges = <&iomuxc  5 136 1>, <&iomuxc  6 145 1>, <&iomuxc  7 150 1>,
                      <&iomuxc  8 146 1>, <&iomuxc  9 151 1>, <&iomuxc 10 147 1>,
-                     <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
+                     <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
                      <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16  39 7>,
                      <&iomuxc 23  56 1>, <&iomuxc 24  61 7>, <&iomuxc 31  46 1>;
 };
index 89b834f3fa17f6b576e31fd61e8bfad205f73923..e7d30f45b161ebb09f9d904f690638ec46c20ce7 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 19cbd879c448984717a83e1d819efdec822c4957..cc9572ea2860a5b619ce27bce26ed78de0c4c13c 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec;
index 10f33301619777a9d676eb49bae893540d920973..dd4ec85ecbaaff534c2128997da4215e5b0f93bc 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                can0 = &flexcan1;
index 39845a7e046303e6448e506d7199a49b7530de74..53d3f8e41e9b8e99a15888cb63a50fc19aa591d4 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                ethernet0 = &fec1;
index 8ff2cbdd8f0df26e59805a12dcb0ee871cc3ca73..be33dfc86838ea16dd09201c60cfcdfe00d3c054 100644 (file)
 / {
        #address-cells = <1>;
        #size-cells = <1>;
+       /*
+        * The decompressor and also some bootloaders rely on a
+        * pre-existing /chosen node to be available to insert the
+        * command line and merge other ATAGS info.
+        * Also for U-Boot there must be a pre-existing /memory node.
+        */
+       chosen {};
+       memory { device_type = "memory"; reg = <0 0>; };
 
        aliases {
                gpio0 = &gpio1;
similarity index 98%
rename from arch/arm/boot/dts/orion5x-lschl.dts
rename to arch/arm/boot/dts/orion5x-linkstation-lschl.dts
index 94740925284587f8f40219e920ced179e67cac3a..ea6c881634b989cad90258e6eb1094dcf36ce8b6 100644 (file)
@@ -2,7 +2,7 @@
  * Device Tree file for Buffalo Linkstation LS-CHLv3
  *
  * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
- * Copyright (C) 2015, 2016
+ * Copyright (C) 2015-2017
  * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
@@ -52,7 +52,7 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-       model = "Buffalo Linkstation Live v3 (LS-CHL)";
+       model = "Buffalo Linkstation LiveV3 (LS-CHL)";
        compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
 
        memory { /* 128 MB */
index c8b2944e304ac6dfdfdacbc3d1b63ab12e7ad73f..ace97e8576dbd9ae34d112b4aa8ec991647134e5 100644 (file)
                                phy-names       = "usb2-phy", "usb3-phy";
                                phys            = <&usb2_picophy0>,
                                                  <&phy_port2 PHY_TYPE_USB3>;
+                               snps,dis_u3_susphy_quirk;
                        };
                };
 
index ea316c4b890efadb31df9f90df5ae382c94d4228..d3f1768840e28aa6a4e1ccba8cb3313bfbe78324 100644 (file)
@@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 18e59feaa3071593936ca4f0ff3d3b021eea9b08..7f479cdb34797c36219fe37bf8cdac79e94c3b8b 100644 (file)
@@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
 CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index ce131ed5939d5ff86054afc3a14de7cb51225a79..ae738a6319f6a341c05a3c6a2dee2da30aa02a40 100644 (file)
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs newregs;
+       struct pt_regs newregs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 &newregs,
index 699157759120f13ed4047b05609e53cb419fffe5..c03bf28d8bbc9a1a3f2be4187465c3cb674e25f1 100644 (file)
@@ -60,7 +60,6 @@
 
 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
 
-static enum cpuhp_state cpuhp_mmdc_state;
 static int ddr_type;
 
 struct fsl_mmdc_devtype_data {
@@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
 
 #ifdef CONFIG_PERF_EVENTS
 
+static enum cpuhp_state cpuhp_mmdc_state;
 static DEFINE_IDA(mmdc_ida);
 
 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
index 3a2e678b8d30cabfb058fd82bb1d3336e3dab02d..0122ad1a60270cda8c53faf69296b8a93a902851 100644 (file)
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
 
 void __init early_abt_enable(void)
 {
-       fsr_info[22].fn = early_abort_handler;
+       fsr_info[FSR_FS_AEA].fn = early_abort_handler;
        local_abt_enable();
-       fsr_info[22].fn = do_bad;
+       fsr_info[FSR_FS_AEA].fn = do_bad;
 }
 
 #ifndef CONFIG_ARM_LPAE
index 67532f24227105c02f8d5a5be13ee46879aac237..afc1f84e763b248b2193715e757d432cc055eac8 100644 (file)
 #define FSR_FS5_0              (0x3f)
 
 #ifdef CONFIG_ARM_LPAE
+#define FSR_FS_AEA             17
+
 static inline int fsr_fs(unsigned int fsr)
 {
        return fsr & FSR_FS5_0;
 }
 #else
+#define FSR_FS_AEA             22
+
 static inline int fsr_fs(unsigned int fsr)
 {
        return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
index eada0b58ba1c7637d46fffaf7eadaf37380d41e8..0cbe24b49710fd4057aec2da23ccbf3830ca4b92 100644 (file)
        #address-cells = <2>;
        #size-cells = <2>;
 
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               /* 16 MiB reserved for Hardware ROM Firmware */
+               hwrom_reserved: hwrom@0 {
+                       reg = <0x0 0x0 0x0 0x1000000>;
+                       no-map;
+               };
+
+               /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved: secmon@10000000 {
+                       reg = <0x0 0x10000000 0x0 0x200000>;
+                       no-map;
+               };
+       };
+
        cpus {
                #address-cells = <0x2>;
                #size-cells = <0x0>;
index 5d28e1cdc9986a18de73efc9c3a988c4556f1d88..c59403adb387dbcd33d375561ee3eaa07ebdfa3f 100644 (file)
        status = "okay";
        pinctrl-0 = <&eth_rgmii_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &ir {
index c53dbeae79f2f5fce8353b169e96ff6c79294aa5..838dad5c209fae0f3a660e79d1f5fef8eb1f0c68 100644 (file)
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
        cbz             w6, .Lcbcencloop
 
        ld1             {v0.16b}, [x5]                  /* get iv */
-       enc_prepare     w3, x2, x5
+       enc_prepare     w3, x2, x6
 
 .Lcbcencloop:
        ld1             {v1.16b}, [x1], #16             /* get next pt block */
        eor             v0.16b, v0.16b, v1.16b          /* ..and xor with iv */
-       encrypt_block   v0, w3, x2, x5, w6
+       encrypt_block   v0, w3, x2, x6, w7
        st1             {v0.16b}, [x0], #16
        subs            w4, w4, #1
        bne             .Lcbcencloop
+       st1             {v0.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_encrypt)
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
        cbz             w6, .LcbcdecloopNx
 
        ld1             {v7.16b}, [x5]                  /* get iv */
-       dec_prepare     w3, x2, x5
+       dec_prepare     w3, x2, x6
 
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
        ld1             {v1.16b}, [x1], #16             /* get next ct block */
        mov             v0.16b, v1.16b                  /* ...and copy to v0 */
-       decrypt_block   v0, w3, x2, x5, w6
+       decrypt_block   v0, w3, x2, x6, w7
        eor             v0.16b, v0.16b, v7.16b          /* xor with iv => pt */
        mov             v7.16b, v1.16b                  /* ct is next iv */
        st1             {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
        bne             .Lcbcdecloop
 .Lcbcdecout:
        FRAME_POP
+       st1             {v7.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_decrypt)
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 AES_ENTRY(aes_ctr_encrypt)
        FRAME_PUSH
-       cbnz            w6, .Lctrfirst          /* 1st time around? */
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
-#if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
-       bcs             .Lctrinc
-       add             x5, x5, #1              /* increment BE ctr */
-       b               .LctrincNx
-#else
-       b               .Lctrinc
-#endif
-.Lctrfirst:
+       cbz             w6, .Lctrnotfirst       /* 1st time around? */
        enc_prepare     w3, x2, x6
        ld1             {v4.16b}, [x5]
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
+
+.Lctrnotfirst:
+       umov            x8, v4.d[1]             /* keep swabbed ctr in reg */
+       rev             x8, x8
 #if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
+       cmn             w8, w4                  /* 32 bit overflow? */
        bcs             .Lctrloop
 .LctrloopNx:
        subs            w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
        mov             v0.8b, v4.8b
        mov             v1.8b, v4.8b
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v0.d[1], x7
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v1.d[1], x7
        ld1             {v2.16b-v3.16b}, [x1], #32      /* get 2 input blocks */
        do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
        st1             {v0.16b-v1.16b}, [x0], #32
 #else
        ldr             q8, =0x30000000200000001        /* addends 1,2,3[,0] */
-       dup             v7.4s, w5
+       dup             v7.4s, w8
        mov             v0.16b, v4.16b
        add             v7.4s, v7.4s, v8.4s
        mov             v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
        eor             v2.16b, v7.16b, v2.16b
        eor             v3.16b, v5.16b, v3.16b
        st1             {v0.16b-v3.16b}, [x0], #64
-       add             x5, x5, #INTERLEAVE
+       add             x8, x8, #INTERLEAVE
 #endif
-       cbz             w4, .LctroutNx
-.LctrincNx:
-       rev             x7, x5
+       rev             x7, x8
        ins             v4.d[1], x7
+       cbz             w4, .Lctrout
        b               .LctrloopNx
-.LctroutNx:
-       sub             x5, x5, #1
-       rev             x7, x5
-       ins             v4.d[1], x7
-       b               .Lctrout
 .Lctr1x:
        adds            w4, w4, #INTERLEAVE
        beq             .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
        mov             v0.16b, v4.16b
        encrypt_block   v0, w3, x2, x6, w7
+
+       adds            x8, x8, #1              /* increment BE ctr */
+       rev             x7, x8
+       ins             v4.d[1], x7
+       bcs             .Lctrcarry              /* overflow? */
+
+.Lctrcarrydone:
        subs            w4, w4, #1
        bmi             .Lctrhalfblock          /* blocks < 0 means 1/2 block */
        ld1             {v3.16b}, [x1], #16
        eor             v3.16b, v0.16b, v3.16b
        st1             {v3.16b}, [x0], #16
-       beq             .Lctrout
-.Lctrinc:
-       adds            x5, x5, #1              /* increment BE ctr */
-       rev             x7, x5
-       ins             v4.d[1], x7
-       bcc             .Lctrloop               /* no overflow? */
-       umov            x7, v4.d[0]             /* load upper word of ctr  */
-       rev             x7, x7                  /* ... to handle the carry */
-       add             x7, x7, #1
-       rev             x7, x7
-       ins             v4.d[0], x7
-       b               .Lctrloop
+       bne             .Lctrloop
+
+.Lctrout:
+       st1             {v4.16b}, [x5]          /* return next CTR value */
+       FRAME_POP
+       ret
+
 .Lctrhalfblock:
        ld1             {v3.8b}, [x1]
        eor             v3.8b, v0.8b, v3.8b
        st1             {v3.8b}, [x0]
-.Lctrout:
        FRAME_POP
        ret
+
+.Lctrcarry:
+       umov            x7, v4.d[0]             /* load upper word of ctr  */
+       rev             x7, x7                  /* ... to handle the carry */
+       add             x7, x7, #1
+       rev             x7, x7
+       ins             v4.d[0], x7
+       b               .Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
        .ltorg
 
index a8ee573fe610bd5e2d8191b4dffb05e134a6d3c2..281f4f1fcd1f68ab2fbc613afa3f2597bd090550 100644 (file)
@@ -164,7 +164,6 @@ config PPC
        select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_KERNEL_GZIP
-       select HAVE_CC_STACKPROTECTOR
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
        bool "Build a relocatable kernel"
        depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
        select NONSTATIC_KERNEL
+       select MODULE_REL_CRCS if MODVERSIONS
        help
          This builds a kernel image that is capable of running at the
          location the kernel is loaded at. For ppc32, there is no any
index b312b152461b0539a22c5939ba7728a38f5d8d32..6e834caa37206a476792823463e81ac4c9617f9c 100644 (file)
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index a34c764ca8dd83435faf75307e30e5149e55de4b..233a7e8cc8e32d6cf0ac904b3f02b2f340e3883b 100644 (file)
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index cc12c61ef315fc6ca5d43233bd54889cd19a01a0..53885512b8d31b12acec28dc3ba6688e57fb9615 100644 (file)
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 #endif
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MODULE_H */
index 0d4531aa2052d77fb8036b05f3550c33cfdde49e..dff79798903da7edc9dbc6e440fa97bbe9afe53f 100644 (file)
 #define   SRR1_ISI_N_OR_G      0x10000000 /* ISI: Access is no-exec or G */
 #define   SRR1_ISI_PROT                0x08000000 /* ISI: Other protection fault */
 #define   SRR1_WAKEMASK                0x00380000 /* reason for wakeup */
-#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 */
+#define   SRR1_WAKEMASK_P8     0x003c0000 /* reason for wakeup on POWER8 and 9 */
 #define   SRR1_WAKESYSERR      0x00300000 /* System error */
 #define   SRR1_WAKEEE          0x00200000 /* External interrupt */
+#define   SRR1_WAKEHVI         0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
 #define   SRR1_WAKEMT          0x00280000 /* mtctrl */
 #define          SRR1_WAKEHMI          0x00280000 /* Hypervisor maintenance */
 #define   SRR1_WAKEDEC         0x00180000 /* Decrementer interrupt */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644 (file)
index 6720190..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-       unsigned long canary;
-
-       /* Try to get a semi random initial value. */
-       get_random_bytes(&canary, sizeof(canary));
-       canary ^= mftb();
-       canary ^= LINUX_VERSION_CODE;
-
-       current->stack_canary = canary;
-       __stack_chk_guard = current->stack_canary;
-}
-
-#endif /* _ASM_STACKPROTECTOR_H */
index f0b238516e9b44b5afabc52a8460daaa6e81ca97..e0b9e576905aa2e1c24060db56de70f8b02ea920 100644 (file)
@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
 
 #ifdef CONFIG_PPC_POWERNV
 extern int icp_opal_init(void);
+extern void icp_opal_flush_interrupt(void);
 #else
 static inline int icp_opal_init(void) { return -ENODEV; }
 #endif
index 23f8082d7bfad95f4c9fbb8201e3e58c3104928f..f4c2b52e58b36eb44bcb6be2a2428bc0458f7660 100644 (file)
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
index 0601e6a7297c64ea4b2129011d32ae42a662ac07..195a9fc8f81c8fc41fca8b05239948d4f28d54af 100644 (file)
@@ -91,9 +91,6 @@ int main(void)
        DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-       DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
index d88573bdd0907c6682cf03395bc296155cea9124..b94887165a101557c97fd6a53139ae7df67dbbd9 100644 (file)
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
        struct eeh_pe *pe = (struct eeh_pe *)data;
-       bool *clear_sw_state = flag;
+       bool clear_sw_state = *(bool *)flag;
        int i, rc = 1;
 
        for (i = 0; rc && i < 3; i++)
index 5742dbdbee4677924ebf0019b891e43879410131..3841d749a430069f4d4f2705c4199c08609b3757 100644 (file)
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       lwz     r0,TSK_STACK_CANARY(r2)
-       lis     r4,__stack_chk_guard@ha
-       stw     r0,__stack_chk_guard@l(r4)
-#endif
+
        lwz     r0,_CCR(r1)
        mtcrf   0xFF,r0
        /* r3-r12 are destroyed -- Cort */
index bb1807184bad5da5f9b65ce69087184e67534c03..0b0f89685b679745895251b011d0db522d4558e1 100644 (file)
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
        for (end = (void *)vers + size; vers < end; vers++)
                if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-                       /* The TOC symbol has no CRC computed. To avoid CRC
-                        * check failing, we must force it to the expected
-                        * value (see CRC check in module.c).
-                        */
-                       if (!strcmp(vers->name, "TOC."))
-                               vers->crc = -(unsigned long)reloc_start;
-#endif
                }
 }
 
index 04885cec24df1413f90121115cf6569e4aa444e6..5dd056df0baaec576431adb4cbe8ab370d8f44c1 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
index ec47a939cbdd6dd81c6c05ed6707f28e12d9f0ea..ac83eb04a8b871293c53e7bd6ff4d439b89704a9 100644 (file)
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
        cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
+       if (!PHANDLE_VALID(cpu_pkg))
+               return;
+
        prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
        prom.cpu = be32_to_cpu(rval);
 
index 6fd30ac7d14a0d2761e82d479fe01724bd53a38d..62a50d6d1053c9f1e3854ecb9de877e0f02cbb3b 100644 (file)
@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (unlikely(debugger_fault_handler(regs)))
                goto bail;
 
-       /* On a kernel SLB miss we can only check for a valid exception entry */
-       if (!user_mode(regs) && (address >= TASK_SIZE)) {
+       /*
+        * The kernel should never take an execute fault nor should it
+        * take a page fault to a kernel address.
+        */
+       if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
                rc = SIGSEGV;
                goto bail;
        }
@@ -390,20 +393,6 @@ good_area:
 #endif /* CONFIG_8xx */
 
        if (is_exec) {
-               /*
-                * An execution fault + no execute ?
-                *
-                * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
-                * deliberately create NX mappings, and use the fault to do the
-                * cache flush. This is usually handled in hash_page_do_lazy_icache()
-                * but we could end up here if that races with a concurrent PTE
-                * update. In that case we need to fall through here to the VMA
-                * check below.
-                */
-               if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
-                       (regs->msr & SRR1_ISI_N_OR_G))
-                       goto bad_area;
-
                /*
                 * Allow execution from readable areas if the MMU does not
                 * provide separate controls over reading and executing.
index cfa53ccc8bafc908e80532a4a64e44ad358dffc7..34f1a0dbc898ee4a28a6adcd41c0ce5d3fe61198 100644 (file)
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                if (!pmdp)
                        return -ENOMEM;
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                }
                pmdp = pmd_offset(pudp, ea);
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                if (!pmd_present(*pmdp)) {
index 61b79119065ff3030162df12446a67c712cd6649..952713d6cf04d0e826a685f181561d587df41257 100644 (file)
@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
        for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
                __tlbiel_pid(pid, set, ric);
        }
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-               asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
-       return;
+       asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
 }
 
 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
        asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
                     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
        asm volatile("ptesync": : :"memory");
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1))
-               asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
index c789258ae1e1ce656be3e4c1bd1a9b28523f307e..eec0e8d0454d11859e83aeef105a07010b1b9aac 100644 (file)
@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
                wmask = SRR1_WAKEMASK_P8;
 
        idle_states = pnv_get_supported_cpuidle_states();
+
        /* We don't want to take decrementer interrupts while we are offline,
-        * so clear LPCR:PECE1. We keep PECE2 enabled.
+        * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
+        * enabled as to let IPIs in.
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
 
@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
                 * contains 0.
                 */
                if (((srr1 & wmask) == SRR1_WAKEEE) ||
+                   ((srr1 & wmask) == SRR1_WAKEHVI) ||
                    (local_paca->irq_happened & PACA_IRQ_EE)) {
-                       icp_native_flush_interrupt();
+                       if (cpu_has_feature(CPU_FTR_ARCH_300))
+                               icp_opal_flush_interrupt();
+                       else
+                               icp_native_flush_interrupt();
                } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
                        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
                        asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
                if (srr1 && !generic_check_cpu_restart(cpu))
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
        }
+
+       /* Re-enable decrementer interrupts */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
        DBG("CPU%d coming online...\n", cpu);
 }
index 60c57657c772fef576e5c4703dfb2a17203978a7..f9670eabfcfa70ca338aa0c5f2e10217803c7162 100644 (file)
@@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
 {
        int hw_cpu = get_hard_smp_processor_id(cpu);
 
+       kvmppc_set_host_ipi(cpu, 1);
        opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
 }
 
 static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
 {
-       int hw_cpu = hard_smp_processor_id();
+       int cpu = smp_processor_id();
 
-       opal_int_set_mfrr(hw_cpu, 0xff);
+       kvmppc_set_host_ipi(cpu, 0);
+       opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
 
        return smp_ipi_demux();
 }
 
+/*
+ * Called when an interrupt is received on an off-line CPU to
+ * clear the interrupt, so that the CPU can go back to nap mode.
+ */
+void icp_opal_flush_interrupt(void)
+{
+       unsigned int xirr;
+       unsigned int vec;
+
+       do {
+               xirr = icp_opal_get_xirr();
+               vec = xirr & 0x00ffffff;
+               if (vec == XICS_IRQ_SPURIOUS)
+                       break;
+               if (vec == XICS_IPI) {
+                       /* Clear pending IPI */
+                       int cpu = smp_processor_id();
+                       kvmppc_set_host_ipi(cpu, 0);
+                       opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
+               } else {
+                       pr_err("XICS: hw interrupt 0x%x to offline cpu, "
+                              "disabling\n", vec);
+                       xics_mask_unknown_vec(vec);
+               }
+
+               /* EOI the interrupt */
+       } while (opal_int_eoi(xirr) > 0);
+}
+
 #endif /* CONFIG_SMP */
 
 static const struct icp_ops icp_opal_ops = {
index b84be675e507857e27766b6339e438270aea0ebe..d0317993e9476fd1178a693638d9ede23860171b 100644 (file)
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
        __tsb_context_switch(__pa(mm->pgd),
-                            &mm->context.tsb_block[0],
+                            &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-                            (mm->context.tsb_block[1].tsb ?
-                             &mm->context.tsb_block[1] :
+                            (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+                             &mm->context.tsb_block[MM_TSB_HUGE] :
                              NULL)
 #else
                             NULL
 #endif
-                            , __pa(&mm->context.tsb_descr[0]));
+                            , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 
 void tsb_grow(struct mm_struct *mm,
index 3bebf395252cc63ee3b39996f8a0d0431e7faf37..4d0248aa0928695597161d93f325a49311b43e2c 100644 (file)
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
        unsigned long order = get_order(size);
        unsigned long p;
 
-       p = __get_free_pages(GFP_KERNEL, order);
+       p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!p) {
                prom_printf("SUN4V: Error, cannot allocate queue.\n");
                prom_halt();
index c59af546f522999342361a5babcb64a1dfab3ccd..3caed40235898698751ff0cf3d6ca7d98dc83da6 100644 (file)
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
        "Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
        "Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-       "Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+       "Linux panicking";
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
-       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
        return NOTIFY_DONE;
 }
index 4bc10e44d1ca32a0acdf69b8492e128e7ef0e600..dfc97a47c9a08a330f31040fe120030ebe8cc098 100644 (file)
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
        atomic_inc(&sun4v_resum_oflow_cnt);
 }
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+       unsigned int insn;
+
+       if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+               return compute_effective_address(regs, insn,
+                                                (insn >> 25) & 0x1f);
+       }
+       return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+                                 struct sun4v_error_entry *ent) {
+
+       unsigned int attrs = ent->err_attrs;
+
+       if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+               unsigned long addr = ent->err_raddr;
+               siginfo_t info;
+
+               if (addr == ~(u64)0) {
+                       /* This seems highly unlikely to ever occur */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+               } else {
+                       unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+                                                             PAGE_SIZE);
+
+                       /* Break the unfortunate news. */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+                                addr);
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+                                page_cnt);
+
+                       while (page_cnt-- > 0) {
+                               if (pfn_valid(addr >> PAGE_SHIFT))
+                                       get_page(pfn_to_page(addr >> PAGE_SHIFT));
+                               addr += PAGE_SIZE;
+                       }
+               }
+               info.si_signo = SIGKILL;
+               info.si_errno = 0;
+               info.si_trapno = 0;
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+       if (attrs & SUN4V_ERR_ATTRS_PIO) {
+               siginfo_t info;
+
+               info.si_signo = SIGBUS;
+               info.si_code = BUS_ADRERR;
+               info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+
+       /* Default to doing nothing */
+       return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
        put_cpu();
 
+       if (!(regs->tstate & TSTATE_PRIV) &&
+           sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+               /* DON'T PANIC: This userspace error was handled. */
+               return;
+       }
+
 #ifdef CONFIG_PCI
        /* Check for the special PCI poke sequence. */
        if (pci_poke_in_progress && pci_poke_cpu == cpu) {
index 6ef688a1ef3e0f022032e5317662b9011c8f74c4..7ff1b0c86a8e5a630844c3ade200b592cbad413d 100644 (file)
@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
                    aesni_simd_skciphers[i]; i++)
                simd_skcipher_free(aesni_simd_skciphers[i]);
 
-       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
-                   aesni_simd_skciphers2[i].simd; i++)
-               simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
+               if (aesni_simd_skciphers2[i].simd)
+                       simd_skcipher_free(aesni_simd_skciphers2[i].simd);
 }
 
 static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
                simd = simd_skcipher_create_compat(algname, drvname, basename);
                err = PTR_ERR(simd);
                if (IS_ERR(simd))
-                       goto unregister_simds;
+                       continue;
 
                aesni_simd_skciphers2[i].simd = simd;
        }
index 17c3564d087a48bc24e41417fe6f128b5d7b9f0d..22ef4f72cf320adf510545ce08fd342bc89839d2 100644 (file)
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-       return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
        /* must be done before validate_group */
        pmu = cpu_to_rapl_pmu(event->cpu);
+       if (!pmu)
+               return -EINVAL;
        event->cpu = pmu->cpu;
        event->pmu_private = pmu;
        event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
        int target;
 
+       if (!pmu) {
+               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+               if (!pmu)
+                       return -ENOMEM;
+
+               raw_spin_lock_init(&pmu->lock);
+               INIT_LIST_HEAD(&pmu->active_list);
+               pmu->pmu = &rapl_pmus->pmu;
+               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+               rapl_hrtimer_init(pmu);
+
+               rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+       }
+
        /*
         * Check if there is an online cpu in the package which collects rapl
         * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
        return 0;
 }
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-       if (pmu)
-               return 0;
-
-       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-       if (!pmu)
-               return -ENOMEM;
-
-       raw_spin_lock_init(&pmu->lock);
-       INIT_LIST_HEAD(&pmu->active_list);
-       pmu->pmu = &rapl_pmus->pmu;
-       pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-       pmu->cpu = -1;
-       rapl_hrtimer_init(pmu);
-       rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-       return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
        u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
        /*
         * Install callbacks. Core will call them for each online cpu.
         */
-
-       ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-                               rapl_cpu_prepare, NULL);
-       if (ret)
-               goto out;
-
        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
                                "perf/x86/rapl:online",
                                rapl_cpu_online, rapl_cpu_offline);
        if (ret)
-               goto out1;
+               goto out;
 
        ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
        if (ret)
-               goto out2;
+               goto out1;
 
        rapl_advertise();
        return 0;
 
-out2:
-       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
-       cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
        pr_warn("Initialization failed (%d), disabled\n", ret);
        cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 {
        cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
        perf_pmu_unregister(&rapl_pmus->pmu);
        cleanup_rapl_pmus();
 }
index 8c4ccdc3a3f3607ee0af4f4006029df3000e0839..1ab45976474d52597700b291e24b0b0a235dff03 100644 (file)
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
-       return pmu->boxes[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
        pmu->registered = false;
 }
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-       struct intel_uncore_pmu *pmu = type->pmus;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       if (pmu) {
-               pkg = topology_physical_package_id(cpu);
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box)
-                               uncore_box_exit(box);
-               }
-       }
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-       struct intel_uncore_type **types;
-
-       for (types = uncore_msr_uncores; *types; types++)
-               __uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
        int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
        }
 }
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box && atomic_dec_return(&box->refcnt) == 0)
-                               uncore_box_exit(box);
-               }
-       }
-       return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg, ncpus = 1;
-
-       if (first_init) {
-               /*
-                * On init we get the number of online cpus in the package
-                * and set refcount for all of them.
-                */
-               ncpus = cpumask_weight(topology_core_cpumask(cpu));
-       }
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (!box)
-                               continue;
-                       /* The first cpu on a package activates the box */
-                       if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-                               uncore_box_init(box);
-               }
-       }
-
-       return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       if (pmu->boxes[pkg])
-                               continue;
-                       /* First cpu of a package allocates the box */
-                       box = uncore_alloc_box(type, cpu_to_node(cpu));
-                       if (!box)
-                               return -ENOMEM;
-                       box->pmu = pmu;
-                       box->pkgid = pkg;
-                       pmu->boxes[pkg] = box;
-               }
-       }
-       return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
                                   int new_cpu)
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, pkg, target;
 
        /* Check if exiting cpu is used for collecting uncore events */
        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-               return 0;
-
+               goto unref;
        /* Find a new cpu to collect uncore events */
        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
        uncore_change_context(uncore_msr_uncores, cpu, target);
        uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+       /* Clear the references */
+       pkg = topology_logical_package_id(cpu);
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (box && atomic_dec_return(&box->refcnt) == 0)
+                               uncore_box_exit(box);
+               }
+       }
        return 0;
 }
 
+static int allocate_boxes(struct intel_uncore_type **types,
+                        unsigned int pkg, unsigned int cpu)
+{
+       struct intel_uncore_box *box, *tmp;
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       LIST_HEAD(allocated);
+       int i;
+
+       /* Try to allocate all required boxes */
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       if (pmu->boxes[pkg])
+                               continue;
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
+                       if (!box)
+                               goto cleanup;
+                       box->pmu = pmu;
+                       box->pkgid = pkg;
+                       list_add(&box->active_list, &allocated);
+               }
+       }
+       /* Install them in the pmus */
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               box->pmu->boxes[pkg] = box;
+       }
+       return 0;
+
+cleanup:
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               kfree(box);
+       }
+       return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, ret, pkg, target;
+
+       pkg = topology_logical_package_id(cpu);
+       ret = allocate_boxes(types, pkg, cpu);
+       if (ret)
+               return ret;
+
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (!box && atomic_inc_return(&box->refcnt) == 1)
+                               uncore_box_init(box);
+               }
+       }
 
        /*
         * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
        if (cret && pret)
                return -ENODEV;
 
-       /*
-        * Install callbacks. Core will call them for each online cpu.
-        *
-        * The first online cpu of each package allocates and takes
-        * the refcounts for all other online cpus in that package.
-        * If msrs are not enabled no allocation is required and
-        * uncore_cpu_prepare() is not called for each online cpu.
-        */
-       if (!cret) {
-              ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-                                      "perf/x86/intel/uncore:prepare",
-                                      uncore_cpu_prepare, NULL);
-               if (ret)
-                       goto err;
-       } else {
-               cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-                                         "perf/x86/intel/uncore:prepare",
-                                         uncore_cpu_prepare, NULL);
-       }
-       first_init = 1;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-                         "perf/x86/uncore:starting",
-                         uncore_cpu_starting, uncore_cpu_dying);
-       first_init = 0;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-                         "perf/x86/uncore:online",
-                         uncore_event_cpu_online, uncore_event_cpu_offline);
+       /* Install hotplug callbacks to setup the targets for each package */
+       ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+                               "perf/x86/intel/uncore:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret)
+               goto err;
        return 0;
 
 err:
-       /* Undo box->init_box() */
-       on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
        return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 static void __exit intel_uncore_exit(void)
 {
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
 }
index 38711df3bcb56b6939f2f84af16b920f0409049b..2266f864b7477a3ff88fee26f07df62eb5073ab4 100644 (file)
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 static inline int __init microcode_init(void)                  { return 0; };
 static inline void __init load_ucode_bsp(void)                 { }
index 1be64da0384ed8d5dea85563633c2740f5587888..e6cfe7ba2d65968611d049a136fc1b09506065e1 100644 (file)
@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
        __u8                    x86_phys_bits;
        /* CPUID returned core id bits: */
        __u8                    x86_coreid_bits;
+       __u8                    cu_id;
        /* Max extended CPUID function supported: */
        __u32                   extended_cpuid_level;
        /* Maximum supported CPUID level, -1=no CPUID: */
index 1e35dd06b090ee91189cb5a52fdf026f2ca5e74b..bd6b8c270c24af4a2869034758e1f3e5e04c01a8 100644 (file)
@@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
-       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ir_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
-       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -2117,6 +2115,7 @@ static inline void __init check_timer(void)
                        if (idx != -1 && irq_trigger(idx))
                                unmask_ioapic_irq(irq_get_chip_data(0));
                }
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
@@ -2138,6 +2137,7 @@ static inline void __init check_timer(void)
                 * legacy devices should be connected to IO APIC #0
                 */
                replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
index 1d3167269a6717902149171fe755123a5c654eb6..2b4cf04239b6c011a9d9a5ef84bdca5ef073a69e 100644 (file)
@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
        /* get information required for multi-node processors */
        if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+               u32 eax, ebx, ecx, edx;
 
-               node_id = cpuid_ecx(0x8000001e) & 7;
+               cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+               node_id  = ecx & 0xff;
+               smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+               if (c->x86 == 0x15)
+                       c->cu_id = ebx & 0xff;
+
+               if (c->x86 >= 0x17) {
+                       c->cpu_core_id = ebx & 0xff;
+
+                       if (smp_num_siblings > 1)
+                               c->x86_max_cores /= smp_num_siblings;
+               }
 
                /*
                 * We may have multiple LLCs if L3 caches exist, so check if we
index 9bab7a8a42936e32270e3573a17a1cd87fb580e0..ede03e849a8bd62808d33e1002c95a20568d5f8c 100644 (file)
@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
        c->x86_coreid_bits = 0;
+       c->cu_id = 0xff;
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
        c->x86_phys_bits = 36;
index 00ef43233e034b0cde9b2adc88b8003ddc42d00b..537c6647d84ca3e7cca771feb6c98bda94ef8c70 100644 (file)
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
        unsigned long when = jiffies + interval;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       if (timer_pending(t)) {
-               if (time_before(when, t->expires))
-                       mod_timer(t, when);
-       } else {
-               t->expires = round_jiffies(when);
-               add_timer_on(t, smp_processor_id());
-       }
+       if (!timer_pending(t) || time_before(when, t->expires))
+               mod_timer(t, round_jiffies(when));
 
        local_irq_restore(flags);
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 done:
        __this_cpu_write(mce_next_interval, iv);
-       __restart_timer(t, iv);
+       __start_timer(t, iv);
 }
 
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
        struct timer_list *t = this_cpu_ptr(&mce_timer);
        unsigned long iv = __this_cpu_read(mce_next_interval);
 
-       __restart_timer(t, interval);
+       __start_timer(t, interval);
 
        if (interval < iv)
                __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
        }
 }
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
        unsigned long iv = check_interval * HZ;
 
        if (mca_cfg.ignore_ce || !iv)
                return;
 
-       per_cpu(mce_next_interval, cpu) = iv;
-
-       t->expires = round_jiffies(jiffies + iv);
-       add_timer_on(t, cpu);
+       this_cpu_write(mce_next_interval, iv);
+       __start_timer(t, iv);
 }
 
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
        unsigned int cpu = smp_processor_id();
 
        setup_pinned_timer(t, mce_timer_fn, cpu);
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
 }
 
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 static int mce_cpu_online(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
        int ret;
 
        mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
                return ret;
        }
        mce_reenable_cpu();
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
        return 0;
 }
 
 static int mce_cpu_pre_down(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
 
        mce_disable_cpu();
        del_timer_sync(t);
index 6a31e2691f3aa0ac68620459c371ca42912c4475..079e81733a58950486a7012b70e06f45e71af688 100644 (file)
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
                if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
-                       cp = find_cpio_data(ucode_path, (void *)initrd_start,
-                                           initrd_end - initrd_start, NULL);
+                       if (!initrd_gone)
+                               cp = find_cpio_data(ucode_path, (void *)initrd_start,
+                                                   initrd_end - initrd_start, NULL);
 #endif
                        if (!(cp.data && cp.size)) {
                                /*
index 2af69d27da629a5c802498e692300f9980862a2a..73102d932760b871896a956a88ff178362cc5a94 100644 (file)
@@ -46,6 +46,8 @@
 static struct microcode_ops    *microcode_ops;
 static bool dis_ucode_ldr = true;
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+       int ret = -EINVAL;
 
        switch (c->x86_vendor) {
        case X86_VENDOR_INTEL:
                if (c->x86 >= 6)
-                       return save_microcode_in_initrd_intel();
+                       ret = save_microcode_in_initrd_intel();
                break;
        case X86_VENDOR_AMD:
                if (c->x86 >= 0x10)
-                       return save_microcode_in_initrd_amd(c->x86);
+                       ret = save_microcode_in_initrd_amd(c->x86);
                break;
        default:
                break;
        }
 
-       return -EINVAL;
+       initrd_gone = true;
+
+       return ret;
 }
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
         * has the virtual address of the beginning of the initrd. It also
         * possibly relocates the ramdisk. In either case, initrd_start contains
         * the updated address so use that instead.
+        *
+        * initrd_gone is for the hotplug case where we've thrown out initrd
+        * already.
         */
-       if (!use_pa && initrd_start)
-               start = initrd_start;
+       if (!use_pa) {
+               if (initrd_gone)
+                       return (struct cpio_data){ NULL, 0, "" };
+               if (initrd_start)
+                       start = initrd_start;
+       }
 
        return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
index 3f329b74e040c23b6b85dfd12a85f80d630c63ac..8325d8a09ab0768dd08156b8e4c5b755b78c10f9 100644 (file)
@@ -41,7 +41,7 @@
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
        struct ucode_cpu_info uci;
        struct cpio_data cp;
 
-       /*
-        * AP loading didn't find any microcode patch, no need to save anything.
-        */
-       if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-               return 0;
-
        if (!load_builtin_intel_microcode(&cp))
                cp = find_microcode_in_initrd(ucode_path, false);
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
        return 0;
 }
 
-
 /*
  * @res_patch, output: a pointer to the patch we found.
  */
index e4e97a5355ce852ac49937fd180f62b614c1286c..de7234401275b56760f27573eea5669e2bda4f68 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
         * it will #GP. Make sure it is replaced after the memset().
         */
        if (static_cpu_has(X86_FEATURE_XSAVES))
-               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+                                              xfeatures_mask;
 
        if (static_cpu_has(X86_FEATURE_FXSR))
                fpstate_init_fxstate(&state->fxsave);
index 85e87b46c318026ed28d87056c516aec3e5fb9ed..dc6ba5bda9fc83630c773a80c4adea6871db0a59 100644 (file)
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
        } else {
                struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
+               irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
                irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
                disable_irq(hdev->irq);
                irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
index 46732dc3b73cd67e874dfe99b0eeb807f816e06a..99b920d0e5163d3162fe8607a28c5e83d0ca7e55 100644 (file)
@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
                int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
 
                if (c->phys_proc_id == o->phys_proc_id &&
-                   per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
-                   c->cpu_core_id == o->cpu_core_id)
-                       return topology_sane(c, o, "smt");
+                   per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
+                       if (c->cpu_core_id == o->cpu_core_id)
+                               return topology_sane(c, o, "smt");
+
+                       if ((c->cu_id != 0xff) &&
+                           (o->cu_id != 0xff) &&
+                           (c->cu_id == o->cu_id))
+                               return topology_sane(c, o, "smt");
+               }
 
        } else if (c->phys_proc_id == o->phys_proc_id &&
                   c->cpu_core_id == o->cpu_core_id) {
index e41af597aed8e2e454965f8459f26b372c091b4f..37e7cf544e51580e8633651bed25858cbcb840bf 100644 (file)
@@ -1356,6 +1356,9 @@ void __init tsc_init(void)
                (unsigned long)cpu_khz / 1000,
                (unsigned long)cpu_khz % 1000);
 
+       /* Sanitize TSC ADJUST before cyc2ns gets initialized */
+       tsc_store_and_check_tsc_adjust(true);
+
        /*
         * Secondary CPUs do not run through tsc_init(), so set up
         * all the scale factors for all CPUs, assuming the same
@@ -1386,8 +1389,6 @@ void __init tsc_init(void)
 
        if (unsynchronized_tsc())
                mark_tsc_unstable("TSCs unsynchronized");
-       else
-               tsc_store_and_check_tsc_adjust(true);
 
        check_system_tsc_reliable();
 
index d0db011051a54212742680d3f25c2c52fb5c8ea6..728f7537847583108075d91ad4aa75edfd86f077 100644 (file)
@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
        if (unsynchronized_tsc())
                return;
 
-       if (tsc_clocksource_reliable) {
-               if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
-                       pr_info(
-                       "Skipped synchronization checks as TSC is reliable.\n");
-               return;
-       }
-
        /*
         * Set the maximum number of test runs to
         *  1 if the CPU does not provide the TSC_ADJUST MSR
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
        int cpus = 2;
 
        /* Also aborts if there is no TSC. */
-       if (unsynchronized_tsc() || tsc_clocksource_reliable)
+       if (unsynchronized_tsc())
                return;
 
        /*
         * Store, verify and sanitize the TSC adjust register. If
         * successful skip the test.
+        *
+        * The test is also skipped when the TSC is marked reliable. This
+        * is true for SoCs which have no fallback clocksource. On these
+        * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
+        * register might have been wreckaged by the BIOS..
         */
-       if (tsc_store_and_check_tsc_adjust(false)) {
+       if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
                atomic_inc(&skip_test);
                return;
        }
index d153be8929a68440ae5e5894497cbb7fa1ab9913..e52c9088660fac47d6da377b39412378ff0157b0 100644 (file)
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
        memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
        /* Set XSTATE_BV */
+       xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
        *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
        /*
index ea9c49adaa1fce1f6cbf3962800e0b24bdf72c91..8aa6bea1cd6cc467cd5d39dbcde7d625c009495c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/debugfs.h>
 #include <linux/mm.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 #include <linux/seq_file.h>
 
 #include <asm/pgtable.h>
@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
                } else
                        note_page(m, &st, __pgprot(0), 1);
 
+               cond_resched();
                start++;
        }
 
index 319148bd4b05091d24576a7535b10aad7bec0c2d..2f25a363068cf9723e8b418e8c1942a6d3ca4029 100644 (file)
@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        efi_scratch.use_pgd = true;
 
+       /*
+        * Certain firmware versions are way too sentimential and still believe
+        * they are exclusive and unquestionable owners of the first physical page,
+        * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+        * (but then write-access it later during SetVirtualAddressMap()).
+        *
+        * Create a 1:1 mapping for this page, to avoid triple faults during early
+        * boot with such firmware. We are free to hand this page to the BIOS,
+        * as trim_bios_range() will reserve the first page and isolate it away
+        * from memory allocators anyway.
+        */
+       if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+               pr_err("Failed to create 1:1 mapping for the first page!\n");
+               return 1;
+       }
+
        /*
         * When making calls to the firmware everything needs to be 1:1
         * mapped and addressable with 32-bit pointers. Map the kernel
index 848e8568fb3c4a90c2eb89783c0420f8b5526cd6..8fd4be610607c2683b16a3e0da4249f4aea732e4 100644 (file)
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 void cpu_reset(void)
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
        local_irq_disable();
        /*
         * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
index f8c82a9b401222c84d5d014ac339439a1708caba..ed1e78e24db0053e993555e14bf730c1cfb868c2 100644 (file)
@@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        if (ret == 0 || (ret && ret != -EOPNOTSUPP))
                goto out;
 
-       ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
-                       ZERO_PAGE(0), biop);
-       if (ret == 0 || (ret && ret != -EOPNOTSUPP))
-               goto out;
-
        ret = 0;
        while (nr_sects != 0) {
                bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
@@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                        return 0;
        }
 
+       if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+                       ZERO_PAGE(0)))
+               return 0;
+
        blk_start_plug(&plug);
        ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
                        &bio, discard);
index df939b54b09f731eac02657957f3f573c51a1ec5..1fad2a6b3bbbf0d1d4ee07f585bdc4d501467b5d 100644 (file)
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
        struct crypto_larval *larval;
        int err;
 
+       alg->cra_flags &= ~CRYPTO_ALG_DEAD;
        err = crypto_check_alg(alg);
        if (err)
                return err;
index f849311e9fd4c94e57d81ba97279ec5fb0cb0ded..533265f110e0297b9fc1e14a7a215a54d634b8d8 100644 (file)
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 unlock:
        list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
                af_alg_free_sg(&rsgl->sgl);
+               list_del(&rsgl->list);
                if (rsgl != &ctx->first_rsgl)
                        sock_kfree_s(sk, rsgl, sizeof(*rsgl));
-               list_del(&rsgl->list);
        }
        INIT_LIST_HEAD(&ctx->list);
        aead_wmem_wakeup(sk);
index 2f82b8eba360e7f369338b7d7a340060d6519f4f..7361d00818e2bb61f5d280c6817db2a2e8d01fc4 100644 (file)
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
        struct device *dev = acpi_desc->dev;
        struct acpi_nfit_flush_work flush;
+       int rc;
 
        /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
        device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        INIT_WORK_ONSTACK(&flush.work, flush_probe);
        COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
        queue_work(nfit_wq, &flush.work);
-       return wait_for_completion_interruptible(&flush.cmp);
+
+       rc = wait_for_completion_interruptible(&flush.cmp);
+       cancel_work_sync(&flush.work);
+       return rc;
 }
 
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
index 9cd0a2d4181699d94f73f2af82490e59f23373e2..c2d3785ec2279f42013cdc4816beb60785279d95 100644 (file)
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
                if (qc->err_mask & ~AC_ERR_OTHER)
                        qc->err_mask &= ~AC_ERR_OTHER;
+       } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+               qc->result_tf.command |= ATA_SENSE;
        }
 
        /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
        /*
-        * Device times out with higher max sects.
+        * These devices time out with higher max sects.
         * https://bugzilla.kernel.org/show_bug.cgi?id=121671
         */
-       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+       { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
 
        /* Devices we expect to fail diagnostics */
 
index 823e938c9a7877a1cadefde9127d447832630061..2f32782cea6d9c584797d1f7d9dc8e99eac0b796 100644 (file)
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
        host->iomap = NULL;
        hpriv->base = devm_ioremap(&pdev->dev, res->start,
                                   resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
        hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
index 4497d263209fb861e08e1650ddf654a98f2102e3..ac350c518e0c9479c05c4d9ff9f6ae918f26b96c 100644 (file)
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
        struct firmware_buf *buf = fw_priv->buf;
 
        __fw_load_abort(buf);
-
-       /* avoid user action after loading abort */
-       fw_priv->buf = NULL;
 }
 
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
        mutex_lock(&fw_lock);
        fw_buf = fw_priv->buf;
-       if (!fw_buf)
+       if (fw_state_is_aborted(&fw_buf->fw_st))
                goto out;
 
        switch (loading) {
index dacb6a8418aa927e8d75a86470b35b414bf48598..fa26ffd25fa61bae95bd441699a54ee8e16818d2 100644 (file)
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn, end_pfn;
+       unsigned long valid_start, valid_end, valid_pages;
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       struct page *first_page;
        struct zone *zone;
        int zone_shift = 0;
 
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        end_pfn = start_pfn + nr_pages;
-       first_page = pfn_to_page(start_pfn);
 
        /* The block contains more than one zone can not be offlined. */
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return sprintf(buf, "none\n");
 
-       zone = page_zone(first_page);
+       zone = page_zone(pfn_to_page(valid_start));
+       valid_pages = valid_end - valid_start;
 
        /* MMOP_ONLINE_KEEP */
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index 872eac4cb1dfdcd069136856e681f741332708e2..a14fac6a01d316a0249fdd45de3e676e0dffed7c 100644 (file)
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_idle(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_suspend(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+                       dev->power.runtime_status != RPM_ACTIVE);
 
        if (rpmflags & RPM_GET_PUT)
                atomic_inc(&dev->power.usage_count);
index f642c4264c277bc05d98dc99eb15ac8091886ba5..168fa175d65a08319f5e92ad10f8f5ff4baea54a 100644 (file)
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
index b4f6520e74f05b8b7a0c1ec710042c0f38a02d7d..62f5bfa5065d919ee3acd9e596373923070e574a 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
                                         u32 mask, u32 value)
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
        if (cc->capabilities & BCMA_CC_CAP_PMU)
                bcma_pmu_early_init(cc);
 
-       if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-               bcma_chipco_serial_init(cc);
-
        if (bus->hosttype == BCMA_HOSTTYPE_SOC)
                bcma_core_chipcommon_flash_detect(cc);
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
        return res;
 }
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
        unsigned int irq;
        u32 baud_base;
        u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
                ports[i].baud_base = baud_base;
                ports[i].reg_shift = 0;
        }
-#endif /* CONFIG_BCM47XX */
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
index 96f17132820080843e9216523bc1328b2f8f7939..89af807cf29ce49e38f60e9e1c3e177ceb0e261a 100644 (file)
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
+       struct bcma_bus *bus = mcore->core->bus;
+
        if (mcore->early_setup_done)
                return;
 
+       bcma_chipco_serial_init(&bus->drv_cc);
        bcma_core_mips_nvram_init(mcore);
 
        mcore->early_setup_done = true;
index 6ce5ce8be2f2dda80a52790ee611106097fabdbe..87fba424817e509e6d87bc931079b5a34ffcb706 100644 (file)
@@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
        mutex_unlock(&reading_mutex);
        if (bytes_read > 0)
                add_device_randomness(rng_buffer, bytes_read);
-       memset(rng_buffer, 0, size);
 }
 
 static inline void cleanup_rng(struct kref *kref)
@@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
                }
        }
 out:
-       memset(rng_buffer, 0, rng_buffer_size());
        return ret ? : err;
 
 out_unlock_reading:
@@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
                /* Outside lock, sure, but y'know: randomness. */
                add_hwgenerator_randomness((void *)rng_fillbuf, rc,
                                           rc * current_quality * 8 >> 10);
-               memset(rng_fillbuf, 0, rng_buffer_size());
        }
        hwrng_fill = NULL;
        return 0;
index e2ce8190ecc9a5c0baf3f6e642a4cff17f89862c..612898b4aaad045f7e96429ac325ecbec91a4295 100644 (file)
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
 static void ccp5_config(struct ccp_device *ccp)
 {
        /* Public side */
-       iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+       iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
 }
 
 static void ccp5other_config(struct ccp_device *ccp)
index 830f35e6005f0e002457d57fab70a8557fb85e24..649e5610a5cea7d08a95767af1c8f8e0153e0b7a 100644 (file)
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
        struct ccp_device *ccp;
 
        spinlock_t lock;
+       struct list_head created;
        struct list_head pending;
        struct list_head active;
        struct list_head complete;
index 6553912804f73f1c061aec9bdd3afd0f0d7426bc..e5d9278f40197427e913993fe9249d405585fe87 100644 (file)
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
        ccp_free_desc_resources(chan->ccp, &chan->complete);
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
        spin_lock_irqsave(&chan->lock, flags);
 
        cookie = dma_cookie_assign(tx_desc);
+       list_del(&desc->entry);
        list_add_tail(&desc->entry, &chan->pending);
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
 
        spin_lock_irqsave(&chan->lock, sflags);
 
-       list_add_tail(&desc->entry, &chan->pending);
+       list_add_tail(&desc->entry, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, sflags);
 
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
        /*TODO: Purge the complete list? */
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
                chan->ccp = ccp;
 
                spin_lock_init(&chan->lock);
+               INIT_LIST_HEAD(&chan->created);
                INIT_LIST_HEAD(&chan->pending);
                INIT_LIST_HEAD(&chan->active);
                INIT_LIST_HEAD(&chan->complete);
index 2ed1e24b44a8ba07b24a1ef4a9d7cca4e57522d4..b4b78b37f8a684698d79bc053be0b2f9db7e654f 100644 (file)
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
        case CRYPTO_ALG_TYPE_AEAD:
                ctx_req.req.aead_req = (struct aead_request *)req;
                ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
-               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
                             ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
                if (ctx_req.ctx.reqctx->skb) {
                        kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
        unsigned int   kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
                null = 1;
                assoclen = 0;
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
        unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
        sub_type = get_aead_subtype(tfm);
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err) {
                        pr_err("AAD copy to destination buffer fails\n");
                        return ERR_PTR(err);
                }
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
 
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return  ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
 
        if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                crypt_len = AES_BLOCK_SIZE;
        else
                crypt_len = req->cryptlen;
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                write_sg_to_skb(skb, &frags, src, req->cryptlen);
        } else {
                aes_gcm_empty_pld_pad(req->dst, authsize - 1);
-               write_sg_to_skb(skb, &frags, dst, crypt_len);
+               write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
+
        }
 
        create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
        unsigned int ck_size;
        int ret = 0, key_ctx_size = 0;
 
-       if (get_aead_subtype(aead) ==
-           CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+       if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+           keylen > 3) {
                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
                memcpy(aeadctx->salt, key + keylen, 4);
        }
index 918da8e6e2d8a587ef7e397e149a9779a8c4a21f..1c65f07e1cc9a56f9dc88b30c10ba57e13b875ed 100644 (file)
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
 int assign_chcr_device(struct chcr_dev **dev)
 {
        struct uld_ctx *u_ctx;
+       int ret = -ENXIO;
 
        /*
         * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
         * must go to the same device to maintain the ordering.
         */
        mutex_lock(&dev_mutex); /* TODO ? */
-       u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
-       if (!u_ctx) {
-               mutex_unlock(&dev_mutex);
-               return -ENXIO;
+       list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+               if (u_ctx && u_ctx->dev) {
+                       *dev = u_ctx->dev;
+                       ret = 0;
+                       break;
        }
-
-       *dev = u_ctx->dev;
        mutex_unlock(&dev_mutex);
-       return 0;
+       return ret;
 }
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 
 static int __init chcr_crypto_init(void)
 {
-       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
                pr_err("ULD register fail: No chcr crypto support in cxgb4");
-               return -1;
-       }
 
        return 0;
 }
index d5af7d64a763bc8ad4ce45d60bada6226059bff1..7ec0a8f124753d1fbcdf7b248d14177330bab977 100644 (file)
@@ -158,6 +158,9 @@ struct ablk_ctx {
 };
 struct chcr_aead_reqctx {
        struct  sk_buff *skb;
+       struct scatterlist *dst;
+       struct scatterlist srcffwd[2];
+       struct scatterlist dstffwd[2];
        short int dst_nents;
        u16 verify;
        u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
index bc5cbc193aae5c6167559ea3d2b9975acc7e6a5a..5b2d78a5b5aaaffd9ba3a858269c8664cd4f89b7 100644 (file)
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              &hw_data->accel_capabilities_mask);
 
        /* Find and map all the device's BARS */
-       i = 0;
+       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
        for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
                         ADF_PCI_MAX_BARS * 2) {
index e8822536530b99666f2f5137c25b36f7b451fe06..33f0a6251e385c8f5f72fc0f384106c368329c86 100644 (file)
@@ -69,6 +69,7 @@
 #define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
index 1e480f140663530a699d8bf51b402bce394a935e..8c4fd255a601b2d6c60e2300f5c878f8a512f57d 100644 (file)
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
        unsigned int csr_val;
        int times = 30;
 
-       if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+       if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
                return 0;
 
        csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
                (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
                                 LOCAL_TO_XFER_REG_OFFSET);
        handle->pci_dev = pci_info->pci_dev;
-       if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+       if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
                sram_bar =
                        &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
                handle->hal_sram_addr_v = sram_bar->virt_addr;
index d5ba43a87a682b6e718d5e2ad7c804498bad61de..200828c60db9ffce12b968956271954a88ac54d0 100644 (file)
@@ -153,6 +153,8 @@ struct cppi41_dd {
 
        /* context for suspend/resume */
        unsigned int dma_tdfdq;
+
+       bool is_suspended;
 };
 
 #define FIST_COMPLETION_QUEUE  93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
        BUG_ON(desc_num >= ALLOC_DECS_NUM);
        c = cdd->chan_busy[desc_num];
        cdd->chan_busy[desc_num] = NULL;
+
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return c;
 }
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
                while (val) {
                        u32 desc, len;
-                       int error;
 
-                       error = pm_runtime_get(cdd->ddev.dev);
-                       if (error < 0)
-                               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-                                       __func__, error);
+                       /*
+                        * This should never trigger, see the comments in
+                        * push_desc_queue()
+                        */
+                       WARN_ON(cdd->is_suspended);
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                        c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-                       pm_runtime_mark_last_busy(cdd->ddev.dev);
-                       pm_runtime_put_autosuspend(cdd->ddev.dev);
                }
        }
        return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
         */
        __iowmb();
 
+       /*
+        * DMA transfers can take at least 200ms to complete with USB mass
+        * storage connected. To prevent autosuspend timeouts, we must use
+        * pm_runtime_get/put() when chan_busy[] is modified. This will get
+        * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+        * outcome of the transfer.
+        */
+       pm_runtime_get(cdd->ddev.dev);
+
        desc_phys = lower_32_bits(c->desc_phys);
        desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
        WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
        cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
-       struct cppi41_dd *cdd = c->cdd;
-       unsigned long flags;
+       struct cppi41_channel *c, *_c;
 
-       spin_lock_irqsave(&cdd->lock, flags);
-       list_add_tail(&c->node, &cdd->pending);
-       spin_unlock_irqrestore(&cdd->lock, flags);
+       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+               push_desc_queue(c);
+               list_del(&c->node);
+       }
 }
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
        struct cppi41_dd *cdd = c->cdd;
+       unsigned long flags;
        int error;
 
        error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
                return;
        }
 
-       if (likely(pm_runtime_active(cdd->ddev.dev)))
-               push_desc_queue(c);
-       else
-               pending_desc(c);
+       spin_lock_irqsave(&cdd->lock, flags);
+       list_add_tail(&c->node, &cdd->pending);
+       if (!cdd->is_suspended)
+               cppi41_run_queue(cdd);
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        pm_runtime_mark_last_busy(cdd->ddev.dev);
        pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
        WARN_ON(!cdd->chan_busy[desc_num]);
        cdd->chan_busy[desc_num] = NULL;
 
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return 0;
 }
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       unsigned long flags;
 
+       spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = true;
        WARN_ON(!list_empty(&cdd->pending));
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       struct cppi41_channel *c, *_c;
        unsigned long flags;
 
        spin_lock_irqsave(&cdd->lock, flags);
-       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-               push_desc_queue(c);
-               list_del(&c->node);
-       }
+       cdd->is_suspended = false;
+       cppi41_run_queue(cdd);
        spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
index 740bbb942594873b08deecb59c801460dcc868ab..7539f73df9e0edbf71b424a66bcabc7d8df01e44 100644 (file)
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
        struct pl330_thread *thrd = NULL;
-       unsigned long flags;
        int chans, i;
 
        if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
        chans = pl330->pcfg.num_chan;
 
-       spin_lock_irqsave(&pl330->lock, flags);
-
        for (i = 0; i < chans; i++) {
                thrd = &pl330->channels[i];
                if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
                thrd = NULL;
        }
 
-       spin_unlock_irqrestore(&pl330->lock, flags);
-
        return thrd;
 }
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
        struct pl330_dmac *pl330;
-       unsigned long flags;
 
        if (!thrd || thrd->free)
                return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
        pl330 = thrd->dmac;
 
-       spin_lock_irqsave(&pl330->lock, flags);
        _free_event(thrd, thrd->ev);
        thrd->free = true;
-       spin_unlock_irqrestore(&pl330->lock, flags);
 }
 
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2122,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
        struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        dma_cookie_init(chan);
        pch->cyclic = false;
 
        pch->thread = pl330_request_channel(pl330);
        if (!pch->thread) {
-               spin_unlock_irqrestore(&pch->lock, flags);
+               spin_unlock_irqrestore(&pl330->lock, flags);
                return -ENOMEM;
        }
 
        tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
 
        return 1;
 }
@@ -2238,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
        struct dma_pl330_chan *pch = to_pchan(chan);
+       struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
        tasklet_kill(&pch->task);
 
        pm_runtime_get_sync(pch->dmac->ddma.dev);
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        pl330_release_channel(pch->thread);
        pch->thread = NULL;
@@ -2251,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
        if (pch->cyclic)
                list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
        pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
        pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
index 921dfa047202952c9064cd39971e68e0e3c28b49..260c4b4b492ec38735715859522068da40c21381 100644 (file)
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
        efi_memory_desc_t *runtime_map;
        int *runtime_entry_count;
+       void *new_fdt_addr;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
        efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
                        p->runtime_map, p->runtime_entry_count);
 
-       return EFI_SUCCESS;
+       return update_fdt_memmap(p->new_fdt_addr, map);
 }
 
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
+       priv.new_fdt_addr = (void *)*new_fdt_addr;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
                                        exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
-               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-               if (status != EFI_SUCCESS) {
-                       /*
-                        * The kernel won't get far without the memory map, but
-                        * may still be able to print something meaningful so
-                        * return success here.
-                        */
-                       return EFI_SUCCESS;
-               }
-
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
index e2b0b1646f995fd94d12e17a8cb1258bec34061f..0635829b18cf3aed41239079e4208336b00cda0f 100644 (file)
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_set_vga_render_state(adev, false);
+
        gmc_v6_0_mc_stop(adev, &save);
 
        if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v6_0_mc_resume(adev, &save);
-       amdgpu_display_set_vga_render_state(adev, false);
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
index 50f5cf7b69d1dc55fd427efa61e7e8e5eeff80d0..fdfb1ec17e660efa0b1f2c1f7273fa8d6fd8567a 100644 (file)
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct drm_pending_vblank_event *event = crtc_state->event;
                /*
-                * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-                * exclusive, if they weren't, this code should be
-                * called on success for TEST_ONLY too.
+                * Free the allocated event. drm_atomic_helper_setup_commit
+                * can allocate an event too, so only free it if it's ours
+                * to prevent a double free in drm_atomic_state_clear.
                 */
-               if (crtc_state->event)
-                       drm_event_cancel_free(dev, &crtc_state->event->base);
+               if (event && (event->base.fence || event->base.file_priv)) {
+                       drm_event_cancel_free(dev, &event->base);
+                       crtc_state->event = NULL;
+               }
        }
 
        if (!fence_state)
index 34f757bcabae8d88382f18392467e67f4e0f6100..4594477dee00bc0ffb7847da556985848df717e7 100644 (file)
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                if (funcs->prepare_fb) {
                        ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
@@ -1685,9 +1682,6 @@ fail:
                if (j >= i)
                        continue;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
index 5a452628939272969c54239498b9a1ca86cd4a67..7a7019ac93884eeeba046ad62b3c81037796beb7 100644 (file)
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
+       mutex_init(&connector->mutex);
        connector->edid_blob_ptr = NULL;
        connector->status = connector_status_unknown;
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
 
+       mutex_destroy(&connector->mutex);
+
        memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
 int drm_connector_register(struct drm_connector *connector)
 {
-       int ret;
+       int ret = 0;
 
-       if (connector->registered)
+       if (!connector->dev->registered)
                return 0;
 
+       mutex_lock(&connector->mutex);
+       if (connector->registered)
+               goto unlock;
+
        ret = drm_sysfs_connector_add(connector);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = drm_debugfs_connector_add(connector);
        if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
        drm_mode_object_register(connector->dev, &connector->base);
 
        connector->registered = true;
-       return 0;
+       goto unlock;
 
 err_debugfs:
        drm_debugfs_connector_remove(connector);
 err_sysfs:
        drm_sysfs_connector_remove(connector);
+unlock:
+       mutex_unlock(&connector->mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
-       if (!connector->registered)
+       mutex_lock(&connector->mutex);
+       if (!connector->registered) {
+               mutex_unlock(&connector->mutex);
                return;
+       }
 
        if (connector->funcs->early_unregister)
                connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
        drm_debugfs_connector_remove(connector);
 
        connector->registered = false;
+       mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
index a525751b4559e9f3cc99850d5aac6e5c3eb6f16c..6594b4088f11bc8e5aa6a6c308ba3fdf633921a3 100644 (file)
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto err_minors;
 
+       dev->registered = true;
+
        if (dev->driver->load) {
                ret = dev->driver->load(dev, flags);
                if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
        drm_lastclose(dev);
 
+       dev->registered = false;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_modeset_unregister_all(dev);
 
index b2c4a0b8a627e39c5828922083b78dfd667ad047..728ca3ea74d2c85df8734ddaa9285e126bc2c82c 100644 (file)
@@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_KBP;
                                DRM_DEBUG_KMS("Found KabyPoint PCH\n");
-                               WARN_ON(!IS_KABYLAKE(dev_priv));
+                               WARN_ON(!IS_SKYLAKE(dev_priv) &&
+                                       !IS_KABYLAKE(dev_priv));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
                                   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
                                   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
         * we can do is to hope that things will still work (and disable RPM).
         */
        i915_gem_init_swizzling(dev_priv);
+       i915_gem_restore_fences(dev_priv);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
 
index 69bc3b0c43905eccf19ad142cf32c7064a09feb0..8493e19b563a134ba588e9df39c3fb5785a25a69 100644 (file)
@@ -1012,6 +1012,8 @@ struct intel_fbc {
        struct work_struct underrun_work;
 
        struct intel_fbc_state_cache {
+               struct i915_vma *vma;
+
                struct {
                        unsigned int mode_flags;
                        uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
                } plane;
 
                struct {
-                       u64 ilk_ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
-                       unsigned int tiling_mode;
                } fb;
        } state_cache;
 
        struct intel_fbc_reg_params {
+               struct i915_vma *vma;
+
                struct {
                        enum pipe pipe;
                        enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
                } crtc;
 
                struct {
-                       u64 ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
                } fb;
 
                int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
        return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-                           const struct i915_ggtt_view *view)
-{
-       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
index 4b23a78147135d2b056fb904eb0c2a2d9359f975..24b5b046754b37e8b8e3ab2c04a9f890b41eecf9 100644 (file)
@@ -2010,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
-               if (WARN_ON(reg->pin_count))
-                       continue;
+               /* Ideally we want to assert that the fence register is not
+                * live at this point (i.e. that no piece of code will be
+                * trying to write through fence + GTT, as that both violates
+                * our tracking of activity and associated locking/barriers,
+                * but also is illegal given that the hw is powered down).
+                *
+                * Previously we used reg->pin_count as a "liveness" indicator.
+                * That is not sufficient, and we need a more fine-grained
+                * tool if we want to have a sanity check here.
+                */
 
                if (!reg->vma)
                        continue;
@@ -3478,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
        /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
-       if (obj->cache_dirty) {
+       if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
                i915_gem_clflush_object(obj, true);
                intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
        }
index 097d9d8c2315e061a3600615532c6d51531ee42c..b8b877c91b0a9b36b1a9ac318b2e5255b096a46e 100644 (file)
@@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
                        if (exec[i].offset !=
                            gen8_canonical_addr(exec[i].offset & PAGE_MASK))
                                return -EINVAL;
-
-                       /* From drm_mm perspective address space is continuous,
-                        * so from this point we're always using non-canonical
-                        * form internally.
-                        */
-                       exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
                }
 
+               /* From drm_mm perspective address space is continuous,
+                * so from this point we're always using non-canonical
+                * form internally.
+                */
+               exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
+
                if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
                        return -EINVAL;
 
index 4b3ff3e5b911167557880228d5da44bb3d2616bf..d09c74973cb37a6c8d599c007ea4a53c0e0994e0 100644 (file)
@@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
        max_order = MAX_ORDER;
 #ifdef CONFIG_SWIOTLB
-       if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
-               max_order = min(max_order, ilog2(IO_TLB_SEGPAGES));
+       if (swiotlb_nr_tbl()) {
+               unsigned int max_segment;
+
+               max_segment = swiotlb_max_segment();
+               if (max_segment) {
+                       max_segment = max_t(unsigned int, max_segment,
+                                           PAGE_SIZE) >> PAGE_SHIFT;
+                       max_order = min(max_order, ilog2(max_segment));
+               }
+       }
 #endif
 
        gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
index dbe9fb41ae535449f996ab36e51bd626be6651fc..8d3e515f27bade27acda9544a30d975628ac8ab3 100644 (file)
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
        __drm_atomic_helper_plane_duplicate_state(plane, state);
 
+       intel_state->vma = NULL;
+
        return state;
 }
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+       /*
+        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+        * We currently don't clear all planes during driver unload, so we have
+        * to be able to unpin vma here for now.
+        *
+        * Normally this can only happen during unload when kmscon is disabled
+        * and userspace doesn't attempt to set a framebuffer at all.
+        */
+       if (vma) {
+               mutex_lock(&plane->dev->struct_mutex);
+               intel_unpin_fb_vma(vma);
+               mutex_unlock(&plane->dev->struct_mutex);
+       }
+
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
index f0b9aa7a0483d1928aecec439d342fa2c85b78d9..891c86aef99dfe1b939d5641d0b604bf9b3a49de 100644 (file)
@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                        i915_vma_pin_fence(vma);
        }
 
+       i915_vma_get(vma);
 err:
        intel_runtime_pm_put(dev_priv);
        return vma;
 }
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-       vma = i915_gem_object_to_ggtt(obj, &view);
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
        if (WARN_ON_ONCE(!vma))
                return;
 
        i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * an fb with another CRTC instead
         */
        for_each_crtc(dev, c) {
-               i = to_intel_crtc(c);
+               struct intel_plane_state *state;
 
                if (c == &intel_crtc->base)
                        continue;
 
-               if (!i->active)
+               if (!to_intel_crtc(c)->active)
                        continue;
 
-               fb = c->primary->fb;
-               if (!fb)
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
                        continue;
 
-               obj = intel_fb_obj(fb);
-               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = c->primary->fb;
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
        plane_state->src_x = 0;
        plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE(DSPADDR(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
        }
        POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_plane_ggtt_offset(plane_state) +
                   intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-       vma = i915_gem_object_to_ggtt(obj, &view);
-       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-                view.type))
-               return -1;
-
-       return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        }
 
        I915_WRITE(PLANE_SURF(pipe, 0),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -4272,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
        drm_crtc_vblank_put(&intel_crtc->base);
 
        wake_up_all(&dev_priv->pending_flip_queue);
-       queue_work(dev_priv->wq, &work->unpin_work);
-
        trace_i915_flip_complete(intel_crtc->plane,
                                 work->pending_flip_obj);
+
+       queue_work(dev_priv->wq, &work->unpin_work);
 }
 
 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                flush_work(&work->mmio_work);
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+       intel_unpin_fb_vma(work->old_vma);
        i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_pending;
        }
 
-       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-       work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->old_vma = to_intel_plane_state(primary->state)->vma;
+       to_intel_plane_state(primary->state)->vma = vma;
+
+       work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
        /*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
        i915_add_request_no_flush(request);
 cleanup_unpin:
-       intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+       to_intel_plane_state(primary->state)->vma = work->old_vma;
+       intel_unpin_fb_vma(vma);
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                        DRM_DEBUG_KMS("failed to pin object\n");
                        return PTR_ERR(vma);
                }
+
+               to_intel_plane_state(new_state)->vma = vma;
        }
 
        return 0;
@@ -14812,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct intel_plane_state *old_intel_state;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-       old_intel_state = to_intel_plane_state(old_state);
-
-       if (!obj && !old_obj)
-               return;
+       struct i915_vma *vma;
 
-       if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-           !INTEL_INFO(dev_priv)->cursor_needs_physical))
-               intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma);
 }
 
 int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = i915_gem_object_ggtt_offset(obj, NULL);
+               addr = intel_plane_ggtt_offset(state);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
 
        intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev_priv);
-
-       /*
-        * Make sure any fbs we allocated at startup are properly
-        * pinned & fenced.  When we do the allocation it's too early
-        * for this.
-        */
-       for_each_crtc(dev, c) {
-               struct i915_vma *vma;
-
-               obj = intel_fb_obj(c->primary->fb);
-               if (obj == NULL)
-                       continue;
-
-               mutex_lock(&dev->struct_mutex);
-               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-                                                c->primary->state->rotation);
-               mutex_unlock(&dev->struct_mutex);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
-                                 to_intel_crtc(c)->pipe);
-                       drm_framebuffer_unreference(c->primary->fb);
-                       c->primary->fb = NULL;
-                       c->primary->crtc = c->primary->state->crtc = NULL;
-                       update_state_fb(c->primary);
-                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-               }
-       }
 }
 
 int intel_connector_register(struct drm_connector *connector)
index 58a756f2f2244d054f80baba8ef9afb6892fc21e..a2f0e070d38d6c10001d34a8ae3969fe1e3d363c 100644 (file)
@@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
                return NULL;
 
        if ((encoder->type == INTEL_OUTPUT_DP ||
-            encoder->type == INTEL_OUTPUT_EDP) &&
+            encoder->type == INTEL_OUTPUT_EDP ||
+            encoder->type == INTEL_OUTPUT_DP_MST) &&
            !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
                return NULL;
 
index cd72ae171eeb673de11f1ca6dcc6f1ab06fde81c..03a2112004f91e1d5ac011cabc255a36867e0e71 100644 (file)
@@ -377,6 +377,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect clip;
+       struct i915_vma *vma;
 
        struct {
                u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
        struct work_struct mmio_work;
 
        struct drm_crtc *crtc;
+       struct i915_vma *old_vma;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+       return i915_ggtt_offset(state->vma);
+}
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
index 62f215b12eb5274b8251d3f46d2a4fdbfc590e96..f3a1d6a5cabe9fcf76f5812dc526781b678f7e41 100644 (file)
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->fb.fence_reg;
+       fbc_ctl |= params->vma->fence->id;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+       if (params->vma->fence) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
                I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
                if (IS_GEN5(dev_priv))
-                       dpfc_ctl |= params->fb.fence_reg;
+                       dpfc_ctl |= params->vma->fence->id;
                if (IS_GEN6(dev_priv)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET,
                                   params->crtc.fence_y_offset);
                }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj;
+
+       cache->vma = NULL;
 
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        if (!cache->plane.visible)
                return;
 
-       obj = intel_fb_obj(fb);
-
-       /* FIXME: We lack the proper locking here, so only run this on the
-        * platforms that need. */
-       if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = get_fence_id(fb);
-       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+       cache->vma = plane_state->vma;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!cache->plane.visible) {
+       if (!cache->vma) {
                fbc->no_fbc_reason = "primary plane not visible";
                return false;
        }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
         */
-       if (cache->fb.tiling_mode != I915_TILING_X ||
-           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+       if (!cache->vma->fence) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
         * zero. */
        memset(params, 0, sizeof(*params));
 
+       params->vma = cache->vma;
+
        params->crtc.pipe = crtc->pipe;
        params->crtc.plane = crtc->plane;
        params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
        params->fb.pixel_format = cache->fb.pixel_format;
        params->fb.stride = cache->fb.stride;
-       params->fb.fence_reg = cache->fb.fence_reg;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
index 8cf2d80f22540a35dc4245a842d8473ce7d852d9..f4a8c4fc57c4e654a1af91903275189c841b35bd 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       intel_unpin_fb_vma(vma);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+               intel_unpin_fb_vma(ifbdev->vma);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
index 8f131a08d440cf02cbcd9ffe255b94e3f4eb6077..242a73e66d82862bea3ba881474efe327cd02d6e 100644 (file)
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
        I915_WRITE(PLANE_SURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
        I915_WRITE(SPSURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index 74856a8b8f35943b08a59f8aed5a546e98058d3c..e64f52464ecf55b83a17f25b434cb9e1474c10e1 100644 (file)
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+               mpllP = (mpllP >> 8) & 0xf;
                if (!mpllP)
                        mpllP = 4;
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-               return clock;
+               return clock / 1000;
        }
 
        ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
index ccdce1b4eec4b8bf183235ebae2eb5395a306430..d5e58a38f160182354b8c9a126bdcbb5d459f40a 100644 (file)
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
        struct nouveau_bo *bo;
        struct nouveau_bo *bo_gart;
        u32 *suspend;
+       struct mutex mutex;
 };
 
 int  nv84_fence_context_new(struct nouveau_channel *);
index 187ecdb8200273baa77c41a42fbc65c3bcb93db6..21a5775028cc612e9a6c81e280777329f18233fd 100644 (file)
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
index 08f9c6fa0f7f210d3e3fd5a0fbe8f11ff40b1972..1fba3862274474f0001deaec9fefaf6b0fd324b4 100644 (file)
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
        if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
                /* block access to objects not created via this interface */
                owner = argv->v0.owner;
-               if (argv->v0.object == 0ULL)
+               if (argv->v0.object == 0ULL &&
+                   argv->v0.type != NVIF_IOCTL_V0_DEL)
                        argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
                else
                        argv->v0.owner = NVDRM_OBJECT_USIF;
index 2c2c645076614b4f9c187d24a5e9d2667ea27778..32097fd615fd1e3a5954019c02dec36bd2e8db8c 100644 (file)
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc->state->event)
+                       drm_crtc_vblank_get(crtc);
+       }
+
        /* Update plane(s). */
        for_each_plane_in_state(state, plane, plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        drm_crtc_send_vblank_event(crtc, crtc->state->event);
                        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                        crtc->state->event = NULL;
+                       drm_crtc_vblank_put(crtc);
                }
        }
 
index 52b87ae83e7b4d0df54e003d58783eddd8deb6f5..f0b322bec7df22de23bbae372333fb484ac6d9ed 100644 (file)
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
        struct nv84_fence_chan *fctx = chan->fence;
 
        nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+       mutex_lock(&priv->mutex);
        nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
+       mutex_unlock(&priv->mutex);
        nouveau_fence_context_del(&fctx->base);
        chan->fence = NULL;
        nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
        fctx->base.sync32 = nv84_fence_sync32;
        fctx->base.sequence = nv84_fence_read(chan);
 
+       mutex_lock(&priv->mutex);
        ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
        if (ret == 0) {
                ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
                                        &fctx->vma_gart);
        }
+       mutex_unlock(&priv->mutex);
 
        if (ret)
                nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
        priv->base.uevent = true;
 
+       mutex_init(&priv->mutex);
+
        /* Use VRAM if there is any ; otherwise fallback to system memory */
        domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
                         /*
index 6f0436df021953337ba29f0ff9c02c507214b07b..f8f2f16c22a2a2502bf283c63a5d1fc124d0ff89 100644 (file)
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
                        );
                }
                for (i = 0; i < size; i++)
-                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
                for (; i < 0x60; i++)
                        nvkm_wr32(device, 0x61c440 + soff, (i << 8));
                nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
index 567466f93cd5d9645020e3b89c282808d88397e4..0db8efbf1c2e2e9cd84b689098bdba9d666843bc 100644 (file)
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
        case 0x94:
        case 0x96:
        case 0x98:
-       case 0xaa:
-       case 0xac:
                return true;
        default:
                break;
index e0c143b865f39cb36074b5e524638530cadeede9..30bd4a6a9d466e11755bf95cee6929473ae65adf 100644 (file)
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       48
+#define KMS_DRIVER_MINOR       49
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 0bcffd8a7bd3ceac0de37cd0b44344011963be77..96683f5b2b1b722db08de97550aa8cdf99444a1c 100644 (file)
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-       args->vram_size = rdev->mc.real_vram_size;
-       args->vram_visible = (u64)man->size << PAGE_SHIFT;
+       args->vram_size = (u64)man->size << PAGE_SHIFT;
+       args->vram_visible = rdev->mc.visible_vram_size;
        args->vram_visible -= rdev->vram_pin_size;
        args->gart_size = rdev->mc.gtt_size;
        args->gart_size -= rdev->gart_pin_size;
index 881bf489478b01b34e9e4df6013fe608c42215ee..686cdd3c86f2e9178768282a0dd173850e0bf063 100644 (file)
@@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
                }
        }
        plane = &vc4_plane->base;
-       ret = drm_universal_plane_init(dev, plane, 0xff,
+       ret = drm_universal_plane_init(dev, plane, 0,
                                       &vc4_plane_funcs,
                                       formats, num_formats,
                                       type, NULL);
index 723fd763da8e3c49de716207cc68eb46080552c1..7a96798b9c0ac611f588c5b8d80269ed23dc157e 100644 (file)
@@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
        mode_cmd.height = var->yres;
        mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
        mode_cmd.pixel_format =
-               drm_mode_legacy_fb_format(var->bits_per_pixel,
-                       ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
+               drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
 
        cur_fb = par->set_fb;
        if (cur_fb && cur_fb->width == mode_cmd.width &&
index f31a778b085148fea4a52599c6b89be203889781..b22d0f83f8e38a9ee0d0eb7381e95d6b90442b61 100644 (file)
@@ -168,7 +168,7 @@ struct cp2112_device {
        atomic_t xfer_avail;
        struct gpio_chip gc;
        u8 *in_out_buffer;
-       spinlock_t lock;
+       struct mutex lock;
 
        struct gpio_desc *desc[8];
        bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        ret = 0;
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
-       return ret <= 0 ? ret : -EIO;
+       mutex_unlock(&dev->lock);
+       return ret < 0 ? ret : -EIO;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        buf[0] = CP2112_GPIO_SET;
        buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        if (ret < 0)
                hid_err(hdev, "error setting GPIO values: %d\n", ret);
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 }
 
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
                                 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        ret = buf[1];
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        return ret;
 }
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
                goto fail;
        }
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        /*
         * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 
 fail:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
        return ret < 0 ? ret : -EIO;
 }
 
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (!dev->in_out_buffer)
                return -ENOMEM;
 
-       spin_lock_init(&dev->lock);
+       mutex_init(&dev->lock);
 
        ret = hid_parse(hdev);
        if (ret) {
index f46f2c5117fae76a1c87105363e5c8db4c8673a3..350accfee8e85b2e545c0171f6582f4e62655b50 100644 (file)
@@ -76,6 +76,9 @@
 #define USB_VENDOR_ID_ALPS_JP          0x044E
 #define HID_DEVICE_ID_ALPS_U1_DUAL     0x120B
 
+#define USB_VENDOR_ID_AMI              0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
+
 #define USB_VENDOR_ID_ANTON            0x1130
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
index c5c5fbe9d60577f44085d86a7fb5cf60efb6acd3..52026dc94d5c4b0306ce585be293cbe2cb1910d9 100644 (file)
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
                .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
-               .driver_data = LG_FF2 },
+               .driver_data = LG_NOGET | LG_FF2 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
                .driver_data = LG_FF3 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
index e9d6cc7cdfc5c8019422d45914dc0363448bcb12..30a2977e26454f10fd72b855ec0b634aa868dcae 100644 (file)
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index 0884dc9554fdf632e684aa3689292368d5fb7e3b..672145b0d8f584d8fce50a74e799b36319448cb4 100644 (file)
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
                wacom->id[0] = STYLUS_DEVICE_ID;
        }
 
-       pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-       if (features->pressure_max > 255)
-               pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-       pressure += (features->pressure_max + 1) / 2;
-
-       input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-       input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-       input_report_abs(input, ABS_PRESSURE, pressure);
-
-       input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-       input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-       /* Only allow the stylus2 button to be reported for the pen tool. */
-       input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+       if (prox) {
+               pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+               if (features->pressure_max > 255)
+                       pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+               pressure += (features->pressure_max + 1) / 2;
+
+               input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+               input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+               input_report_abs(input, ABS_PRESSURE, pressure);
+
+               input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+               input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+               /* Only allow the stylus2 button to be reported for the pen tool. */
+               input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+       }
 
        if (!prox)
                wacom->id[0] = 0;
index cd49cb17eb7fb385ddbf507ef14b2ea2be090159..308dbda700ebdaeb02f222aa46dc7bb79c24c0da 100644 (file)
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
                return ret;
        }
 
+       init_cached_read_index(channel);
        next_read_location = hv_get_next_read_location(inring_info);
        next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
                                                    sizeof(desc),
index e34d82e79b988a781010cad1e0f283617dfb8471..c21ca7bf2efe4f02d420ef53c679b321eceb8fe6 100644 (file)
@@ -58,7 +58,7 @@
 #define SMBSLVDAT      (0xC + piix4_smba)
 
 /* count for request_region */
-#define SMBIOSIZE      8
+#define SMBIOSIZE      9
 
 /* PCI Address Constants */
 #define SMBBA          0x090
@@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        u8 port;
        int retval;
 
+       mutex_lock(&piix4_mutex_sb800);
+
        /* Request the SMBUS semaphore, avoid conflicts with the IMC */
        smbslvcnt  = inb_p(SMBSLVCNT);
        do {
@@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                usleep_range(1000, 2000);
        } while (--retries);
        /* SMBus is still owned by the IMC, we give up */
-       if (!retries)
+       if (!retries) {
+               mutex_unlock(&piix4_mutex_sb800);
                return -EBUSY;
-
-       mutex_lock(&piix4_mutex_sb800);
+       }
 
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
 
        outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
 
-       mutex_unlock(&piix4_mutex_sb800);
-
        /* Release the semaphore */
        outb_p(smbslvcnt | 0x20, SMBSLVCNT);
 
+       mutex_unlock(&piix4_mutex_sb800);
+
        return retval;
 }
 
index 2bbf0c521bebb5c44840a8ce5296024910d8d73d..7d61b566e148dddd21ce2986cba233567802f93c 100644 (file)
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 static int palmas_gpadc_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 static int palmas_gpadc_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
index 9a081465c42f4225d26747a734c5daceea68c842..6bb23a49e81eb8cf304a2f81f50c5e36a91e2d48 100644 (file)
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
index 45266404f7e3b5bd7c7790a0b05ff298fc24d5a9..964f5231a831c437c277e4bcfe292720104043f7 100644 (file)
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
index 90ab8a2d2846f8a8591ee6b1615dce2c984020ef..183c14329d6e350f6325b0e77a95dcb56de892dd 100644 (file)
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
        mutex_lock(&data->lock);
 
-       while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+       while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
                ret = max30100_read_measurement(data);
                if (ret)
                        break;
index 9c47bc98f3acdea4cb4b56b23a72e582c34a413b..2a22ad92033306d02eec60f288c00a65d5186c37 100644 (file)
@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  */
-#define DHT11_START_TRANSMISSION       18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN   18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX   20000  /* us */
 #define DHT11_MIN_TIMERES      34000  /* ns */
 #define DHT11_THRESHOLD                49000  /* ns */
 #define DHT11_AMBIG_LOW                23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                ret = gpio_direction_output(dht11->gpio, 0);
                if (ret)
                        goto err;
-               msleep(DHT11_START_TRANSMISSION);
+               usleep_range(DHT11_START_TRANSMISSION_MIN,
+                            DHT11_START_TRANSMISSION_MAX);
                ret = gpio_direction_input(dht11->gpio);
                if (ret)
                        goto err;
index d0faca294006f4f53a40ebcf40839f9758b52c09..86a6585b847df90f07256dd4027eeab426be7f77 100644 (file)
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
 
        case RXE_MEM_TYPE_MR:
        case RXE_MEM_TYPE_FMR:
-               return ((iova < mem->iova) ||
-                       ((iova + length) > (mem->iova + mem->length))) ?
-                       -EFAULT : 0;
+               if (iova < mem->iova ||
+                   length > mem->length ||
+                   iova > mem->iova + mem->length - length)
+                       return -EFAULT;
+               return 0;
 
        default:
                return -EFAULT;
index 3435efff879960cece0c7e122b5960a057f2d4a1..5bcf073289729bd881960e8f797af1aa82e3b5bc 100644 (file)
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
                                goto err2;
                        }
 
-                       resid = mtu;
+                       qp->resp.resid = mtu;
                } else {
                        if (pktlen != resid) {
                                state = RESPST_ERR_LENGTH;
index 92595b98e7ede7b96dc4c9dd44f4629262f6e3a6..022be0e22eba97b10b95e653f48cee44f1d4ba36 100644 (file)
@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
                return -EINVAL;
        }
 
-       if (test_bit(ABS_MT_SLOT, dev->absbit)) {
-               nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
-               error = input_mt_init_slots(dev, nslot, 0);
-               if (error)
+       if (test_bit(EV_ABS, dev->evbit)) {
+               input_alloc_absinfo(dev);
+               if (!dev->absinfo) {
+                       error = -EINVAL;
                        goto fail1;
-       } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
-               input_set_events_per_packet(dev, 60);
+               }
+
+               if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+                       nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+                       error = input_mt_init_slots(dev, nslot, 0);
+                       if (error)
+                               goto fail1;
+               } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+                       input_set_events_per_packet(dev, 60);
+               }
        }
 
        if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
index 8993983e3fe4892b748ff5d77c1597880c63e2c9..bb7762bf2879b3a7383b2d326a859833adbbf360 100644 (file)
@@ -42,13 +42,19 @@ config RMI4_SMB
 config RMI4_F03
         bool "RMI4 Function 03 (PS2 Guest)"
        depends on RMI4_CORE
-       depends on SERIO=y || RMI4_CORE=SERIO
         help
           Say Y here if you want to add support for RMI4 function 03.
 
           Function 03 provides PS2 guest support for RMI4 devices. This
           includes support for TrackPoints on TouchPads.
 
+config RMI4_F03_SERIO
+       tristate
+       depends on RMI4_CORE
+       depends on RMI4_F03
+       default RMI4_CORE
+       select SERIO
+
 config RMI4_2D_SENSOR
        bool
        depends on RMI4_CORE
index 11447ab1055cd4beadf7eca752bdf9494d76cef1..bf5c36e229bacd63dd7e77d028aa65fb4555ce89 100644 (file)
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
        data->enabled = true;
        if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = disable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to disable irq for wake: %d\n",
                                 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
        disable_irq(irq);
        if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = enable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to enable irq for wake: %d\n",
                                 retval);
index 83cf11312fd971e0cacc16bd70eb474dd9c13b52..c9d1c91e1887094f2ef740d9eee3af8d16ee6f82 100644 (file)
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
        }
        platform_set_drvdata(wm->battery_dev, wm);
        wm->battery_dev->dev.parent = dev;
-       wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+       wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
        ret = platform_device_add(wm->battery_dev);
        if (ret < 0)
                goto batt_reg_err;
index 54a5e870a8f56d627f9622a4a9fb03e07347aa0a..efbcf8435185244cfbfac87f3f36661d88a13ceb 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/bitops.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@ struct keystone_irq_device {
        struct irq_domain       *irqd;
        struct regmap           *devctrl_regs;
        u32                     devctrl_offset;
+       raw_spinlock_t          wa_lock;
 };
 
 static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
        /* nothing to do here */
 }
 
-static void keystone_irq_handler(struct irq_desc *desc)
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
 {
-       unsigned int irq = irq_desc_get_irq(desc);
-       struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
+       struct keystone_irq_device *kirq = keystone_irq;
+       unsigned long wa_lock_flags;
        unsigned long pending;
        int src, virq;
 
        dev_dbg(kirq->dev, "start irq %d\n", irq);
 
-       chained_irq_enter(irq_desc_get_chip(desc), desc);
-
        pending = keystone_irq_readl(kirq);
        keystone_irq_writel(kirq, pending);
 
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
                        if (!virq)
                                dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
                                         src, virq);
+                       raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
                        generic_handle_irq(virq);
+                       raw_spin_unlock_irqrestore(&kirq->wa_lock,
+                                                  wa_lock_flags);
                }
        }
 
-       chained_irq_exit(irq_desc_get_chip(desc), desc);
-
        dev_dbg(kirq->dev, "end irq %d\n", irq);
+       return IRQ_HANDLED;
 }
 
 static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       raw_spin_lock_init(&kirq->wa_lock);
+
        platform_set_drvdata(pdev, kirq);
 
-       irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
+       ret = request_irq(kirq->irq, keystone_irq_handler,
+                         0, dev_name(dev), kirq);
+       if (ret) {
+               irq_domain_remove(kirq->irqd);
+               return ret;
+       }
 
        /* clear all source bits */
        keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
        struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
        int hwirq;
 
+       free_irq(kirq->irq, kirq);
+
        for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
                irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
 
index 17304705f2cf9443b99690565ece5b537e3e61c4..05fa9f7af53cd78732802dfbec28c27770e6f01f 100644 (file)
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
        .irq_ack = icoll_ack_irq,
        .irq_mask = icoll_mask_irq,
        .irq_unmask = icoll_unmask_irq,
+       .flags = IRQCHIP_MASK_ON_SUSPEND |
+                IRQCHIP_SKIP_SET_WAKE,
 };
 
 static struct irq_chip asm9260_icoll_chip = {
        .irq_ack = icoll_ack_irq,
        .irq_mask = asm9260_mask_irq,
        .irq_unmask = asm9260_unmask_irq,
+       .flags = IRQCHIP_MASK_ON_SUSPEND |
+                IRQCHIP_SKIP_SET_WAKE,
 };
 
 asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
index 7c6c57216bf29f301690d270ca5f05a55dea3d3f..8a9f742d8ed72c810d49115ff802706d2b9d7e7f 100644 (file)
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
                return PTR_ERR(key);
        }
 
-       rcu_read_lock();
+       down_read(&key->sem);
 
        ukp = user_key_payload(key);
        if (!ukp) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EKEYREVOKED;
        }
 
        if (cc->key_size != ukp->datalen) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
        memcpy(cc->key, ukp->data, cc->key_size);
 
-       rcu_read_unlock();
+       up_read(&key->sem);
        key_put(key);
 
        /* clear the flag since following operations may invalidate previously valid key */
index 6400cffb986df21be7289dce8d2a4d4ff228a9a2..3570bcb7a4a4e5cade63c39b9071ed610da1e4ae 100644 (file)
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        unsigned long flags;
        struct priority_group *pg;
        struct pgpath *pgpath;
-       bool bypassed = true;
+       unsigned bypassed = 1;
 
        if (!atomic_read(&m->nr_valid_paths)) {
                clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
         */
        do {
                list_for_each_entry(pg, &m->priority_groups, list) {
-                       if (pg->bypassed == bypassed)
+                       if (pg->bypassed == !!bypassed)
                                continue;
                        pgpath = choose_path_in_pg(m, pg, nr_bytes);
                        if (!IS_ERR_OR_NULL(pgpath)) {
index 9d7275fb541ad422b171cbe0ee43335e3e1efc27..6e702fc69a83cb27f6bc1792d4871ad18d708f71 100644 (file)
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
                int srcu_idx;
                struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 
+               if (unlikely(!map)) {
+                       dm_put_live_table(md, srcu_idx);
+                       return;
+               }
                ti = dm_table_find_target(map, pos);
                dm_put_live_table(md, srcu_idx);
        }
index ebb5e391b800e0bbbca2fd9da926a4ff191839e0..87a6b65ed3af5f72f176625bcc2bfad7e21b0885 100644 (file)
@@ -1206,7 +1206,7 @@ static int cec_config_thread_func(void *arg)
                las->log_addr[i] = CEC_LOG_ADDR_INVALID;
                if (last_la == CEC_LOG_ADDR_INVALID ||
                    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-                   !(last_la & type2mask[type]))
+                   !((1 << last_la) & type2mask[type]))
                        last_la = la_list[0];
 
                err = cec_config_log_addr(adap, i, last_la);
index 01a804792f3007c1b8512b0cbdf4aea1a000cb82..b5972440c1bf606e677ad312a9a391576f859f70 100644 (file)
@@ -1023,7 +1023,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
                if (!host->busy_status && busy_resp &&
                    !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
                    (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
-                       /* Unmask the busy IRQ */
+
+                       /* Clear the busy start IRQ */
+                       writel(host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+
+                       /* Unmask the busy end IRQ */
                        writel(readl(base + MMCIMASK0) |
                               host->variant->busy_detect_mask,
                               base + MMCIMASK0);
@@ -1038,10 +1043,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
 
                /*
                 * At this point we are not busy with a command, we have
-                * not received a new busy request, mask the busy IRQ and
-                * fall through to process the IRQ.
+                * not received a new busy request, clear and mask the busy
+                * end IRQ and fall through to process the IRQ.
                 */
                if (host->busy_status) {
+
+                       writel(host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+
                        writel(readl(base + MMCIMASK0) &
                               ~host->variant->busy_detect_mask,
                               base + MMCIMASK0);
@@ -1283,12 +1292,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                }
 
                /*
-                * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
-                * enabled) since the HW seems to be triggering the IRQ on both
-                * edges while monitoring DAT0 for busy completion.
+                * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
+                * enabled) in mmci_cmd_irq() function where ST Micro busy
+                * detection variant is handled. Considering the HW seems to be
+                * triggering the IRQ on both edges while monitoring DAT0 for
+                * busy completion and that same status bit is used to monitor
+                * start and end of busy detection, special care must be taken
+                * to make sure that both start and end interrupts are always
+                * cleared one after the other.
                 */
                status &= readl(host->base + MMCIMASK0);
-               writel(status, host->base + MMCICLEAR);
+               if (host->variant->busy_detect)
+                       writel(status & ~host->variant->busy_detect_mask,
+                              host->base + MMCICLEAR);
+               else
+                       writel(status, host->base + MMCICLEAR);
 
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
index 23909804ffb840d3187f21f67180a634a769425e..0def99590d162ebcfb86a16a6b9d5adf96f19cb6 100644 (file)
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
                if (intmask & SDHCI_INT_RETUNE)
                        mmc_retune_needed(host->mmc);
 
-               if (intmask & SDHCI_INT_CARD_INT) {
+               if ((intmask & SDHCI_INT_CARD_INT) &&
+                   (host->ier & SDHCI_INT_CARD_INT)) {
                        sdhci_enable_sdio_irq_nolock(host, false);
                        host->thread_isr |= SDHCI_INT_CARD_INT;
                        result = IRQ_WAKE_THREAD;
index c12d2618eebf76397b2e71eaeb6f2fafa938fea6..3872ab96b80a39eecbb1d0b8150a2e8288915e46 100644 (file)
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
                if (skb == NULL)
                        break;
                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->rx_info[i].mapping)) {
+                       dev_kfree_skb(skb);
+                       np->rx_info[i].skb = NULL;
+                       break;
+               }
                /* Grrr, we cannot offset to correctly align the IP header. */
                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
        }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        unsigned int entry;
+       unsigned int prev_tx;
        u32 status;
-       int i;
+       int i, j;
 
        /*
         * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        }
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
+       prev_tx = np->cur_tx;
        entry = np->cur_tx % TX_RING_SIZE;
        for (i = 0; i < skb_num_frags(skb); i++) {
                int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                                               skb_frag_size(this_frag),
                                               PCI_DMA_TODEVICE);
                }
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->tx_info[entry].mapping)) {
+                       dev->stats.tx_dropped++;
+                       goto err_out;
+               }
 
                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
                np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
 
        return NETDEV_TX_OK;
-}
 
+err_out:
+       entry = prev_tx % TX_RING_SIZE;
+       np->tx_info[entry].skb = NULL;
+       if (i > 0) {
+               pci_unmap_single(np->pci_dev,
+                                np->tx_info[entry].mapping,
+                                skb_first_frag_len(skb),
+                                PCI_DMA_TODEVICE);
+               np->tx_info[entry].mapping = 0;
+               entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+               for (j = 1; j < i; j++) {
+                       pci_unmap_single(np->pci_dev,
+                                        np->tx_info[entry].mapping,
+                                        skb_frag_size(
+                                               &skb_shinfo(skb)->frags[j-1]),
+                                        PCI_DMA_TODEVICE);
+                       entry++;
+               }
+       }
+       dev_kfree_skb_any(skb);
+       np->cur_tx = prev_tx;
+       return NETDEV_TX_OK;
+}
 
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
                                break;  /* Better luck next round. */
                        np->rx_info[entry].mapping =
                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(np->pci_dev,
+                                               np->rx_info[entry].mapping)) {
+                               dev_kfree_skb(skb);
+                               np->rx_info[entry].skb = NULL;
+                               break;
+                       }
                        np->rx_ring[entry].rxaddr =
                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
                }
index c0fb80acc2dad4b91d3b3cb8198be50dc9597fbd..baba2db9d9c25988da94cb323e5d1a6832a12b51 100644 (file)
 #define DEFAULT_RX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_RX_RING_SIZE       64
 #define MAX_RX_RING_SIZE       8192
-#define RX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define RX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->rx_ring_size)
 
 #define DEFAULT_TX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_TX_RING_SIZE       64
 #define MAX_TX_RING_SIZE       4096
-#define TX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define TX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->tx_ring_size)
 
 /* level of occupied TX descriptors under which we wake up TX process */
  */
 #define MACB_HALT_TIMEOUT      1230
 
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+       return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* Dma buffer descriptor is 4 words length (instead of 2 words)
+        * for 64b GEM.
+        */
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               idx <<= 1;
+#endif
+       return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+       return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
 /* Ring buffer accessors */
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 {
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
                                          unsigned int index)
 {
-       return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+       index = macb_tx_ring_wrap(queue->bp, index);
+       index = macb_adj_dma_desc_idx(queue->bp, index);
+       return &queue->tx_ring[index];
 }
 
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
        dma_addr_t offset;
 
        offset = macb_tx_ring_wrap(queue->bp, index) *
-                sizeof(struct macb_dma_desc);
+                       macb_dma_desc_get_size(queue->bp);
 
        return queue->tx_ring_dma + offset;
 }
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 {
-       return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+       index = macb_rx_ring_wrap(bp, index);
+       index = macb_adj_dma_desc_idx(bp, index);
+       return &bp->rx_ring[index];
 }
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
        }
 }
 
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
 {
-       desc->addr = (u32)addr;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       desc->addrh = (u32)(addr >> 32);
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               desc_64->addrh = upper_32_bits(addr);
+       }
 #endif
+       desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+       dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               addr = ((u64)(desc_64->addrh) << 32);
+       }
+#endif
+       addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+       return addr;
 }
 
 static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
 
        /* Set end of TX queue */
        desc = macb_tx_desc(queue, 0);
-       macb_set_addr(desc, 0);
+       macb_set_addr(bp, desc, 0);
        desc->ctrl = MACB_BIT(TX_USED);
 
        /* Make descriptor updates visible to hardware */
        wmb();
 
        /* Reinitialize the TX desc queue */
-       queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+       queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
        /* Make TX ring reflect state of hardware */
        queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
        unsigned int            entry;
        struct sk_buff          *skb;
        dma_addr_t              paddr;
+       struct macb_dma_desc *desc;
 
        while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
                          bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
                rmb();
 
                bp->rx_prepared_head++;
+               desc = macb_rx_desc(bp, entry);
 
                if (!bp->rx_skbuff[entry]) {
                        /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
 
                        if (entry == bp->rx_ring_size - 1)
                                paddr |= MACB_BIT(RX_WRAP);
-                       macb_set_addr(&(bp->rx_ring[entry]), paddr);
-                       bp->rx_ring[entry].ctrl = 0;
+                       macb_set_addr(bp, desc, paddr);
+                       desc->ctrl = 0;
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
                } else {
-                       bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
-                       bp->rx_ring[entry].ctrl = 0;
+                       desc->addr &= ~MACB_BIT(RX_USED);
+                       desc->ctrl = 0;
                }
        }
 
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
                bool rxused;
 
                entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-               desc = &bp->rx_ring[entry];
+               desc = macb_rx_desc(bp, entry);
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
                rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               addr = macb_get_addr(bp, desc);
                ctrl = desc->ctrl;
 
                if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 static inline void macb_init_rx_ring(struct macb *bp)
 {
        dma_addr_t addr;
+       struct macb_dma_desc *desc = NULL;
        int i;
 
        addr = bp->rx_buffers_dma;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               bp->rx_ring[i].addr = addr;
-               bp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(bp, i);
+               macb_set_addr(bp, desc, addr);
+               desc->ctrl = 0;
                addr += bp->rx_buffer_size;
        }
-       bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
        bp->rx_tail = 0;
 }
 
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
 
        for (tail = bp->rx_tail; budget > 0; tail++) {
                struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
-               u32 addr, ctrl;
+               u32 ctrl;
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               addr = desc->addr;
                ctrl = desc->ctrl;
 
-               if (!(addr & MACB_BIT(RX_USED)))
+               if (!(desc->addr & MACB_BIT(RX_USED)))
                        break;
 
                if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
        i = tx_head;
        entry = macb_tx_ring_wrap(bp, i);
        ctrl = MACB_BIT(TX_USED);
-       desc = &queue->tx_ring[entry];
+       desc = macb_tx_desc(queue, entry);
        desc->ctrl = ctrl;
 
        if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                i--;
                entry = macb_tx_ring_wrap(bp, i);
                tx_skb = &queue->tx_skb[entry];
-               desc = &queue->tx_ring[entry];
+               desc = macb_tx_desc(queue, entry);
 
                ctrl = (u32)tx_skb->size;
                if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                        ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 
                /* Set TX buffer descriptor */
-               macb_set_addr(desc, tx_skb->mapping);
+               macb_set_addr(bp, desc, tx_skb->mapping);
                /* desc->addr must be visible to hardware before clearing
                 * 'TX_USED' bit in desc->ctrl.
                 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
                if (!skb)
                        continue;
 
-               desc = &bp->rx_ring[i];
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               desc = macb_rx_desc(bp, i);
+               addr = macb_get_addr(bp, desc);
+
                dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
 static void gem_init_rings(struct macb *bp)
 {
        struct macb_queue *queue;
+       struct macb_dma_desc *desc = NULL;
        unsigned int q;
        int i;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                for (i = 0; i < bp->tx_ring_size; i++) {
-                       queue->tx_ring[i].addr = 0;
-                       queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+                       desc = macb_tx_desc(queue, i);
+                       macb_set_addr(bp, desc, 0);
+                       desc->ctrl = MACB_BIT(TX_USED);
                }
-               queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+               desc->ctrl |= MACB_BIT(TX_WRAP);
                queue->tx_head = 0;
                queue->tx_tail = 0;
        }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
        int i;
+       struct macb_dma_desc *desc = NULL;
 
        macb_init_rx_ring(bp);
 
        for (i = 0; i < bp->tx_ring_size; i++) {
-               bp->queues[0].tx_ring[i].addr = 0;
-               bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+               desc = macb_tx_desc(&bp->queues[0], i);
+               macb_set_addr(bp, desc, 0);
+               desc->ctrl = MACB_BIT(TX_USED);
        }
        bp->queues[0].tx_head = 0;
        bp->queues[0].tx_tail = 0;
-       bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+       desc->ctrl |= MACB_BIT(TX_WRAP);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               dmacfg |= GEM_BIT(ADDR64);
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       dmacfg |= GEM_BIT(ADDR64);
 #endif
                netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
                           dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
        macb_configure_dma(bp);
 
        /* Initialize TX and RX buffers */
-       macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+       macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
 #endif
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+               queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 
                /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = GEM_IMR(hw_q - 1);
                        queue->TBQP = GEM_TBQP(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = GEM_TBQPH(hw_q -1);
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
                } else {
                        /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = MACB_IMR;
                        queue->TBQP = MACB_TBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = MACB_TBQPH;
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = MACB_TBQPH;
 #endif
                }
 
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        dma_addr_t addr;
        u32 ctl;
        int i;
 
        lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
-                                         sizeof(struct macb_dma_desc)),
+                                         macb_dma_desc_get_size(lp)),
                                         &lp->rx_ring_dma, GFP_KERNEL);
        if (!lp->rx_ring)
                return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
        if (!lp->rx_buffers) {
                dma_free_coherent(&lp->pdev->dev,
                                  AT91ETHER_MAX_RX_DESCR *
-                                 sizeof(struct macb_dma_desc),
+                                 macb_dma_desc_get_size(lp),
                                  lp->rx_ring, lp->rx_ring_dma);
                lp->rx_ring = NULL;
                return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
 
        addr = lp->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-               lp->rx_ring[i].addr = addr;
-               lp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(lp, i);
+               macb_set_addr(lp, desc, addr);
+               desc->ctrl = 0;
                addr += AT91ETHER_MAX_RBUFF_SZ;
        }
 
        /* Set the Wrap bit on the last descriptor */
-       lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
 
        /* Reset buffer index */
        lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
 
        dma_free_coherent(&lp->pdev->dev,
                          AT91ETHER_MAX_RX_DESCR *
-                         sizeof(struct macb_dma_desc),
+                         macb_dma_desc_get_size(lp),
                          lp->rx_ring, lp->rx_ring_dma);
        lp->rx_ring = NULL;
 
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        unsigned char *p_recv;
        struct sk_buff *skb;
        unsigned int pktlen;
 
-       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+       desc = macb_rx_desc(lp, lp->rx_tail);
+       while (desc->addr & MACB_BIT(RX_USED)) {
                p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
-               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+               pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
                skb = netdev_alloc_skb(dev, pktlen + 2);
                if (skb) {
                        skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
                        lp->stats.rx_dropped++;
                }
 
-               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+               if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
                        lp->stats.multicast++;
 
                /* reset ownership bit */
-               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+               desc->addr &= ~MACB_BIT(RX_USED);
 
                /* wrap after last buffer */
                if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
                        lp->rx_tail = 0;
                else
                        lp->rx_tail++;
+
+               desc = macb_rx_desc(lp, lp->rx_tail);
        }
 }
 
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
        device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+       if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
                dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+               bp->hw_dma_cap = HW_DMA_CAP_64B;
+       } else
+               bp->hw_dma_cap = HW_DMA_CAP_32B;
 #endif
 
        spin_lock_init(&bp->lock);
index d67adad67be1c097a339d993a866597b4d202f4d..fc8550a5d47f75df540521c27f07f5c2d03995f9 100644 (file)
 /* Bitfields in DCFG6. */
 #define GEM_PBUF_LSO_OFFSET                    27
 #define GEM_PBUF_LSO_SIZE                      1
+#define GEM_DAW64_OFFSET                       23
+#define GEM_DAW64_SIZE                         1
 
 /* Constants for CLK */
 #define MACB_CLK_DIV8                          0
 struct macb_dma_desc {
        u32     addr;
        u32     ctrl;
+};
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       u32     addrh;
-       u32     resvd;
-#endif
+enum macb_hw_dma_cap {
+       HW_DMA_CAP_32B,
+       HW_DMA_CAP_64B,
 };
 
+struct macb_dma_desc_64 {
+       u32 addrh;
+       u32 resvd;
+};
+#endif
+
 /* DMA descriptor bitfields */
 #define MACB_RX_USED_OFFSET                    0
 #define MACB_RX_USED_SIZE                      1
@@ -874,6 +884,10 @@ struct macb {
        unsigned int            jumbo_max_len;
 
        u32                     wol;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       enum macb_hw_dma_cap hw_dma_cap;
+#endif
 };
 
 static inline bool macb_is_gem(struct macb *bp)
index 2f85b64f01fa06d708e52bde776708f96b55c30e..1e4695270da6cc422c441542783a0ae24dd943a6 100644 (file)
@@ -31,6 +31,7 @@ struct lmac {
        u8                      lmac_type;
        u8                      lane_to_sds;
        bool                    use_training;
+       bool                    autoneg;
        bool                    link_up;
        int                     lmacid; /* ID within BGX */
        int                     lmacid_bd; /* ID on board */
@@ -461,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
        /* power down, reset autoneg, autoneg enable */
        cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
        cfg &= ~PCS_MRX_CTL_PWR_DN;
-       cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+       cfg |= PCS_MRX_CTL_RST_AN;
+       if (lmac->phydev) {
+               cfg |= PCS_MRX_CTL_AN_EN;
+       } else {
+               /* In scenarios where PHY driver is not present or it's a
+                * non-standard PHY, FW sets AN_EN to inform Linux driver
+                * to do auto-neg and link polling or not.
+                */
+               if (cfg & PCS_MRX_CTL_AN_EN)
+                       lmac->autoneg = true;
+       }
        bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
 
        if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -472,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
                return 0;
        }
 
-       if (lmac->lmac_type == BGX_MODE_SGMII) {
+       if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
                if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
                                 PCS_MRX_STATUS_AN_CPT, false)) {
                        dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -678,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
        return -1;
 }
 
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+       u64 pcs_link, an_result;
+       u8 speed;
+
+       pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                               BGX_GMP_PCS_MRX_STATUS);
+
+       /*Link state bit is sticky, read it again*/
+       if (!(pcs_link & PCS_MRX_STATUS_LINK))
+               pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                                       BGX_GMP_PCS_MRX_STATUS);
+
+       if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+                        PCS_MRX_STATUS_AN_CPT, false)) {
+               lmac->link_up = false;
+               lmac->last_speed = SPEED_UNKNOWN;
+               lmac->last_duplex = DUPLEX_UNKNOWN;
+               goto next_poll;
+       }
+
+       lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+       an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+                                BGX_GMP_PCS_ANX_AN_RESULTS);
+
+       speed = (an_result >> 3) & 0x3;
+       lmac->last_duplex = (an_result >> 1) & 0x1;
+       switch (speed) {
+       case 0:
+               lmac->last_speed = 10;
+               break;
+       case 1:
+               lmac->last_speed = 100;
+               break;
+       case 2:
+               lmac->last_speed = 1000;
+               break;
+       default:
+               lmac->link_up = false;
+               lmac->last_speed = SPEED_UNKNOWN;
+               lmac->last_duplex = DUPLEX_UNKNOWN;
+               break;
+       }
+
+next_poll:
+
+       if (lmac->last_link != lmac->link_up) {
+               if (lmac->link_up)
+                       bgx_sgmii_change_link_state(lmac);
+               lmac->last_link = lmac->link_up;
+       }
+
+       queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
 static void bgx_poll_for_link(struct work_struct *work)
 {
        struct lmac *lmac;
        u64 spu_link, smu_link;
 
        lmac = container_of(work, struct lmac, dwork.work);
+       if (lmac->is_sgmii) {
+               bgx_poll_for_sgmii_link(lmac);
+               return;
+       }
 
        /* Receive link is latching low. Force it high and verify it */
        bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -775,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
            (lmac->lmac_type != BGX_MODE_XLAUI) &&
            (lmac->lmac_type != BGX_MODE_40G_KR) &&
            (lmac->lmac_type != BGX_MODE_10G_KR)) {
-               if (!lmac->phydev)
-                       return -ENODEV;
-
+               if (!lmac->phydev) {
+                       if (lmac->autoneg) {
+                               bgx_reg_write(bgx, lmacid,
+                                             BGX_GMP_PCS_LINKX_TIMER,
+                                             PCS_LINKX_TIMER_COUNT);
+                               goto poll;
+                       } else {
+                               /* Default to below link speed and duplex */
+                               lmac->link_up = true;
+                               lmac->last_speed = 1000;
+                               lmac->last_duplex = 1;
+                               bgx_sgmii_change_link_state(lmac);
+                               return 0;
+                       }
+               }
                lmac->phydev->dev_flags = 0;
 
                if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -786,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
                        return -ENODEV;
 
                phy_start_aneg(lmac->phydev);
-       } else {
-               lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
-                                                  WQ_MEM_RECLAIM, 1);
-               if (!lmac->check_link)
-                       return -ENOMEM;
-               INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
-               queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+               return 0;
        }
 
+poll:
+       lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+                                          WQ_MEM_RECLAIM, 1);
+       if (!lmac->check_link)
+               return -ENOMEM;
+       INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+       queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
        return 0;
 }
 
index c18ebfeb203919ea9b16e406bb678bd05a98a656..a60f189429bb658cb5ab8383982f86ddd9090fc3 100644 (file)
 #define         PCS_MRX_CTL_LOOPBACK1                  BIT_ULL(14)
 #define         PCS_MRX_CTL_RESET                      BIT_ULL(15)
 #define BGX_GMP_PCS_MRX_STATUS         0x30008
+#define         PCS_MRX_STATUS_LINK                    BIT_ULL(2)
 #define         PCS_MRX_STATUS_AN_CPT                  BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV            0x30010
 #define BGX_GMP_PCS_ANX_AN_RESULTS     0x30020
+#define BGX_GMP_PCS_LINKX_TIMER                0x30040
+#define PCS_LINKX_TIMER_COUNT                  0x1E84
 #define BGX_GMP_PCS_SGM_AN_ADV         0x30068
 #define BGX_GMP_PCS_MISCX_CTL          0x30078
+#define  PCS_MISC_CTL_MODE                     BIT_ULL(8)
 #define  PCS_MISC_CTL_DISP_EN                  BIT_ULL(13)
 #define  PCS_MISC_CTL_GMX_ENO                  BIT_ULL(11)
 #define  PCS_MISC_CTL_SAMP_PT_MASK     0x7Full
index 67befedef7098ddbde763738eb7940116efa283e..578c7f8f11bf23add2ac4d3c2263e371b4509136 100644 (file)
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
        int speed = 2;
 
        if (!xcv) {
-               dev_err(&xcv->pdev->dev,
-                       "XCV init not done, probe may have failed\n");
+               pr_err("XCV init not done, probe may have failed\n");
                return;
        }
 
index 1a7f8ad7b9c6111ea2f8839a5d28c82af1ef13a8..cd49a54c538d5202f1bb0cb632b8fdb306a66989 100644 (file)
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-done:
+
+       /* Remember currently programmed MAC */
        ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
        dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
        /* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
        if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT))
+           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
                be_dev_mac_del(adapter, adapter->pmac_id[0]);
+               eth_zero_addr(adapter->dev_mac);
+       }
 
        be_clear_uc_list(adapter);
        be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
        if (status)
                return status;
 
-       /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+       /* Normally this condition usually true as the ->dev_mac is zeroed.
+        * But on BE3 VFs the initial MAC is pre-programmed by PF and
+        * subsequent be_dev_mac_add() can fail (after fresh boot)
+        */
+       if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+               int old_pmac_id = -1;
+
+               /* Remember old programmed MAC if any - can happen on BE3 VF */
+               if (!is_zero_ether_addr(adapter->dev_mac))
+                       old_pmac_id = adapter->pmac_id[0];
+
                status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
                if (status)
                        return status;
+
+               /* Delete the old programmed MAC as we successfully programmed
+                * a new MAC
+                */
+               if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+                       be_dev_mac_del(adapter, old_pmac_id);
+
                ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
        }
 
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+               /* Initial MAC for BE3 VFs is already programmed by PF */
+               if (BEx_chip(adapter) && be_virtfn(adapter))
+                       memcpy(adapter->dev_mac, mac, ETH_ALEN);
        }
 
        return 0;
index c1b6716679208a69bc66f017a68e5b6e35c60064..957bfc220978479a5ccee32b58ae26d4236fe939 100644 (file)
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
                if (!rxb->page)
                        continue;
 
-               dma_unmap_single(rx_queue->dev, rxb->dma,
-                                PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(rx_queue->dev, rxb->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(rxb->page);
 
                rxb->page = NULL;
index 87226685f74215a2093e59a99bd8042a2e2585c5..8fa18fc17cd2e25f2e3458e608abe6f5a96b60d9 100644 (file)
 
 static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       writel(value, reg_addr + reg);
+       writel(value, base + reg);
 }
 
 #define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 
 static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       return readl(reg_addr + reg);
+       return readl(base + reg);
 }
 
 static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
index 672b64606321c3a1e2ab8eb9be8666ad04713d5f..8aed72860e7c0eece690c97ae38b1fbedfa58557 100644 (file)
@@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
                        struct hns_nic_ring_data *ring_data)
 {
        struct hns_nic_priv *priv = netdev_priv(ndev);
-       struct device *dev = priv->dev;
        struct hnae_ring *ring = ring_data->ring;
+       struct device *dev = ring_to_dev(ring);
        struct netdev_queue *dev_queue;
        struct skb_frag_struct *frag;
        int buf_num;
index c7e939945259dc876b66cfedd0d85f9d7e90a914..53daa6ca5d83b60f7ad8632694658922921f82f5 100644 (file)
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
        return -ETIMEDOUT;
 }
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
        return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
                (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
index d5a9372ed84d3127cf16c07bc43666651fe2486d..9aa4226919542f6496fedce45a09e09685433efe 100644 (file)
@@ -1099,7 +1099,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        new_prof.tx_ring_size = tx_size;
        new_prof.rx_ring_size = rx_size;
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
@@ -1774,7 +1774,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        new_prof.tx_ring_num[TX_XDP] = xdp_count;
        new_prof.rx_ring_num = channel->rx_count;
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index 761f8b12399cab245abccc0f7d7f84fde742c14d..3b4961a8e8e44d6987ebd23f9239e747c7fc6cd5 100644 (file)
@@ -2042,6 +2042,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                        if (priv->tx_cq[t] && priv->tx_cq[t][i])
                                mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
                }
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
        }
 
        for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2184,9 +2186,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof)
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog)
 {
-       int t;
+       struct bpf_prog *xdp_prog;
+       int i, t;
 
        mlx4_en_copy_priv(tmp, priv, prof);
 
@@ -2200,6 +2204,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                }
                return -ENOMEM;
        }
+
+       /* All rx_rings has the same xdp_prog.  Pick the first one. */
+       xdp_prog = rcu_dereference_protected(
+               priv->rx_ring[0]->xdp_prog,
+               lockdep_is_held(&priv->mdev->state_lock));
+
+       if (xdp_prog && carry_xdp_prog) {
+               xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+               if (IS_ERR(xdp_prog)) {
+                       mlx4_en_free_resources(tmp);
+                       return PTR_ERR(xdp_prog);
+               }
+               for (i = 0; i < tmp->rx_ring_num; i++)
+                       rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+                                          xdp_prog);
+       }
+
        return 0;
 }
 
@@ -2214,7 +2235,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int t;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2248,11 +2268,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mlx4_en_free_resources(priv);
        mutex_unlock(&mdev->state_lock);
 
-       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
-               kfree(priv->tx_ring[t]);
-               kfree(priv->tx_cq[t]);
-       }
-
        free_netdev(dev);
 }
 
@@ -2755,7 +2770,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
        }
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
        if (err) {
                if (prog)
                        bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3499,7 +3514,7 @@ int mlx4_en_reset_config(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index eac527e25ec902c2a586e9952272b9e8e599e2c8..cc003fdf0ed929a981b1403f6a7d0099825fec4b 100644 (file)
@@ -514,8 +514,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
                return;
 
        for (ring = 0; ring < priv->rx_ring_num; ring++) {
-               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+                       local_bh_disable();
                        napi_reschedule(&priv->rx_cq[ring]->napi);
+                       local_bh_enable();
+               }
        }
 }
 
index 0e8b7c44931f907ed881d093077e93b92ae0305d..8258d08acd8c2029a8bcb812dd5efd85d8c7b0f2 100644 (file)
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
                return;
 
        mlx4_stop_catas_poll(dev);
+       if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+           mlx4_is_slave(dev)) {
+               /* In mlx4_remove_one on a VF */
+               u32 slave_read =
+                       swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+               if (mlx4_comm_internal_err(slave_read)) {
+                       mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+                                __func__);
+                       mlx4_enter_error_state(dev->persist);
+               }
+       }
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 88ee7d8a59231a47d6b7aca2006f9780dbefa578..086920b615af7180e891893ffd00928c0bd0238f 100644 (file)
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
                    enum mlx4_port_type *type);
index ba1c6cd0cc79590075f4420a930b613c9fdedc62..cec59bc264c9ac197048fd7c98bcd5cf25de0efd 100644 (file)
@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof);
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog);
 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
                                    struct mlx4_en_priv *tmp);
 
index 3797cc7c1288078298ec655921f9fc9f804df97e..caa837e5e2b991fc3666776d2050fe20b1c6c7f6 100644 (file)
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (cmd->cmdif_rev > CMD_IF_REV) {
                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
                        CMD_IF_REV, cmd->cmdif_rev);
-               err = -ENOTSUPP;
+               err = -EOPNOTSUPP;
                goto err_free_page;
        }
 
index 951dbd58594dcd3b32b680f752c1105132d85ba8..d5ecb8f53fd43684f185d590c8dc5553a4f25ab4 100644 (file)
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 #else
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
index f0b460f47f2992caad4eec7ea0d655296a46e99c..0523ed47f597c715296c5ea843245625bf3dac62 100644 (file)
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
        for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        err = mlx5e_dbcnl_validate_ets(netdev, ets);
        if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
        struct mlx5_core_dev *mdev = priv->mdev;
        struct ieee_ets ets;
        struct ieee_pfc pfc;
-       int err = -ENOTSUPP;
+       int err = -EOPNOTSUPP;
        int i;
 
        if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
+       if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+               netdev_err(netdev, "%s, ets is not supported\n", __func__);
+               return;
+       }
+
        if (priority >= CEE_DCBX_MAX_PRIO) {
                netdev_err(netdev,
                           "%s, priority is out of range\n", __func__);
index 5197817e4b2f8a6b24af61b4fb08c271e69d1a5f..bb67863aa361168a8566349ef356d9a991d411be 100644 (file)
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
        coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        mutex_lock(&priv->state_lock);
 
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
        void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-       int i;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+       int tt;
 
        MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
 
-       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-               mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               memset(tirc, 0, ctxlen);
+               mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+               mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+       }
 }
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       bool hash_changed = false;
        void *in;
 
        if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
        }
 
-       if (key)
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+           hfunc != priv->params.rss_hfunc) {
+               priv->params.rss_hfunc = hfunc;
+               hash_changed = true;
+       }
+
+       if (key) {
                memcpy(priv->params.toeplitz_hash_key, key,
                       sizeof(priv->params.toeplitz_hash_key));
+               hash_changed = hash_changed ||
+                              priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+       }
 
-       if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-               priv->params.rss_hfunc = hfunc;
-
-       mlx5e_modify_tirs_hash(priv, in, inlen);
+       if (hash_changed)
+               mlx5e_modify_tirs_hash(priv, in, inlen);
 
        mutex_unlock(&priv->state_lock);
 
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        u32 mlx5_wol_mode;
 
        if (!wol_supported)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (wol->wolopts & ~wol_supported)
                return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 
        if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
            !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (!rx_mode_changed)
                return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
        bool reset;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
                netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
index 1fe80de5d68f1f3cf09c6e32530cd32114580051..a0e5a69402b30a349b196eaa72ce1a413b5479b2 100644 (file)
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
                                               MLX5_FLOW_NAMESPACE_KERNEL);
 
        if (!priv->fs.ns)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        err = mlx5e_arfs_create_tables(priv);
        if (err) {
index d088effd7160355849faacead1326f2198d12e8d..f33f72d0237c1bafc702f4066dab31ab22963a47 100644 (file)
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
        ns = mlx5_get_flow_namespace(priv->mdev,
                                     MLX5_FLOW_NAMESPACE_ETHTOOL);
        if (!ns)
-               return ERR_PTR(-ENOTSUPP);
+               return ERR_PTR(-EOPNOTSUPP);
 
        table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
                                                       flow_table_properties_nic_receive.log_max_ft_size)),
index 2b7dd315020cd9e1a21b28643621122695cd06dd..f14ca3385fdd683b12f434e289cc8e264040c1ed 100644 (file)
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt)
 {
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
        MLX5_SET(tirc, tirc, rx_hash_fn,
                 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
        if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, priv->params.toeplitz_hash_key, len);
        }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       default:
+               WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+       }
 }
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
                                      enum mlx5e_traffic_types tt)
 {
-       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                                MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
-
-       switch (tt) {
-       case MLX5E_TT_IPV4_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-
-       case MLX5E_TT_IPV6:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-       default:
-               WARN_ONCE(true,
-                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-       }
+       mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
            !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
                               < 3) {
                mlx5_core_warn(mdev,
                               "Not creating net device, some required device capabilities are missing\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
index 46bef6a26a8cdbebf268b6275271367c4109a77d..c5282b6aba8baf6a3c8f345c55764110cdfd6ff5 100644 (file)
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
                                   __be32 *saddr,
                                   int *out_ttl)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct rtable *rt;
        struct neighbour *n = NULL;
        int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
        return -EOPNOTSUPP;
 #endif
-
-       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-               pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
-               ip_rt_put(rt);
-               return -EOPNOTSUPP;
-       }
+       /* if the egress device isn't on the same HW e-switch, we use the uplink */
+       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       else
+               *out_dev = rt->dst.dev;
 
        ttl = ip4_dst_hoplimit(&rt->dst);
        n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        *out_n = n;
        *saddr = fl4->saddr;
        *out_ttl = ttl;
-       *out_dev = rt->dst.dev;
 
        return 0;
 }
index f14d9c9ba77394b83aea50564afd3c762613467a..d0c8bf014453ea38736182c03ba7b2d9c5bcd4d7 100644 (file)
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
 
        if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
            !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
                  vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
        if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
index 03293ed1cc22d2716ff5708dc2312b7291cc1899..595f7c7383b399440aedec593ae0fb0c37bb6748 100644 (file)
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
        return 0;
 
 out_notsupp:
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
+               err = -EOPNOTSUPP;
                goto ns_err;
        }
 
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
        ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
        if (!ns) {
                esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
                esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
                err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
                if (err1)
-                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
        }
        if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
                if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
        int vport;
        int err;
 
+       /* disable PF RoCE so missed packets don't go through RoCE steering */
+       mlx5_dev_list_lock();
+       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        err = esw_create_offloads_fdb_table(esw, nvports);
        if (err)
-               return err;
+               goto create_fdb_err;
 
        err = esw_create_offloads_table(esw);
        if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
                        goto err_reps;
        }
 
-       /* disable PF RoCE so missed packets don't go through RoCE steering */
-       mlx5_dev_list_lock();
-       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        return 0;
 
 err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
 
 create_ft_err:
        esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
-       /* enable back PF RoCE */
-       mlx5_dev_list_lock();
-       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
        if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
                        esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
        }
 
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
index c4478ecd8056e42de2c359eb7a2abfd9e6400090..b53fc85a2375778ddd02ac07d21d88b56c49e432 100644 (file)
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
                                                flow_table_properties_nic_receive.
                                                flow_modify_en);
        if (!atomic_mod_cap)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        opmod = 1;
 
        return  mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
index 0ac7a2fc916c438bc535b20d45964009747f0b33..6346a8f5883bcc911ef422cf572fd1891ddf73c9 100644 (file)
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
        struct mlx5_flow_table *ft;
 
        ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-       if (!ns)
+       if (WARN_ON(!ns))
                return -EINVAL;
        ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
        if (IS_ERR(ft)) {
index d01e9f21d4691ea497aa7ea0666c83e330c078bb..3c315eb8d270f6f94ecaea2c8ee4d78ed1244658 100644 (file)
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
                return 0;
        }
 
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 
index d2ec9d232a70727df71d0c733f60c78a55415392..fd12e0a377a567c693c7f174d7762dd6071ff925 100644 (file)
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
        u32 out[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
                                    MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
        u32 in[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        memset(in, 0, sizeof(in));
        return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
index 269e4401c342d1375e70a40ba9905dddf9b65cef..7129c30a2ab477d23be1b8b8d34e7190618e0f9f 100644 (file)
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
        if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        in = mlx5_vzalloc(inlen);
        if (!in)
index be3c91c7f211d94ad7386b77de73676933a46dcd..5484fd726d5af7f5f10708c57d062b9992be655d 100644 (file)
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
 {
        void __iomem *ioaddr = hw->pcsr;
        u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+       u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
        int ret = 0;
 
+       /* Discard masked bits */
+       intr_status &= ~intr_mask;
+
        /* Not used events (e.g. MMC interrupts) are not handled. */
        if ((intr_status & GMAC_INT_STATUS_MMCTIS))
                x->mmc_tx_irq_n++;
index ece59c54a653348014b686882e28e5bf1b335e37..4a40a3d825b41c2758a086a40cb216d6165f1cfa 100644 (file)
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
 {
        /* Finish setting up the DEVICE info. */
        dev->mtu             = AX_MTU;
-       dev->hard_header_len = 0;
-       dev->addr_len        = 0;
+       dev->hard_header_len = AX25_MAX_HEADER_LEN;
+       dev->addr_len        = AX25_ADDR_LEN;
        dev->type            = ARPHRD_AX25;
        dev->tx_queue_len    = 10;
        dev->header_ops      = &ax25_header_ops;
index 5a1cc089acb7fd2e79c18876cd7951f6dfb6e747..86e5749226ef4cf65d6070bca1ab0d4be35bf2e0 100644 (file)
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
        ndev = hv_get_drvdata(device);
        buffer = get_per_channel_state(channel);
 
+       /* commit_rd_index() -> hv_signal_on_read() needs this. */
+       init_cached_read_index(channel);
+
        do {
                desc = get_next_pkt_raw(channel);
                if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
 
                        bufferlen = bytes_recvd;
                }
+
+               init_cached_read_index(channel);
+
        } while (1);
 
        if (bufferlen > NETVSC_PACKET_SIZE)
index 1e05b7c2d157a22e06d115bcdd1d268876be03a7..0844f849641346b092e83923c1d196709b355e4c 100644 (file)
@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
 {
        dev->mtu                = 64 * 1024;
        dev->hard_header_len    = ETH_HLEN;     /* 14   */
+       dev->min_header_len     = ETH_HLEN;     /* 14   */
        dev->addr_len           = ETH_ALEN;     /* 6    */
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
index 4026185658381df004a7d641e2be7bcb9a45b509..c27011bbe30c52d2eb892ab0d86f8cf3d6f4deb9 100644 (file)
@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        size_t linear;
 
        if (q->flags & IFF_VNET_HDR) {
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 
                err = -EINVAL;
                if (len < vnet_hdr_len)
@@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 
        if (q->flags & IFF_VNET_HDR) {
                struct virtio_net_hdr vnet_hdr;
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
                if (iov_iter_count(iter) < vnet_hdr_len)
                        return -EINVAL;
 
index c0b4e65267af8b541974bd3a246897e1c38a9ac9..46fe1ae919a30a9a9b7644b5862f4a5bfa0b56ef 100644 (file)
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
        if (rc)
                return rc;
 
-       iproc_mdio_config_clk(priv->base);
-
        /* Prepare the read operation */
        cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
                (reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
        if (rc)
                return rc;
 
-       iproc_mdio_config_clk(priv->base);
-
        /* Prepare the write operation */
        cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
                (reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
        bus->read = iproc_mdio_read;
        bus->write = iproc_mdio_write;
 
+       iproc_mdio_config_clk(priv->base);
+
        rc = of_mdiobus_register(bus, pdev->dev.of_node);
        if (rc) {
                dev_err(&pdev->dev, "MDIO bus registration failed\n");
index e55809c5beb71a6c1a3a0a60420cb3c263f6a33b..6742070ca676f57694a9a6cb11364941deb520a0 100644 (file)
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8795,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8795",
-       .features       = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+       .features       = PHY_BASIC_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
index 92b08383cafa8b88e8d5b79ea3a5c0da9998770f..8c8e15b8739dec0ae96d72b514045a99eb1ed7e1 100644 (file)
@@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        struct module *ndev_owner = dev->dev.parent->driver->owner;
        struct mii_bus *bus = phydev->mdio.bus;
        struct device *d = &phydev->mdio.dev;
+       bool using_genphy = false;
        int err;
 
        /* For Ethernet device drivers that register their own MDIO bus, we
@@ -933,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                        d->driver =
                                &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
 
+               using_genphy = true;
+       }
+
+       if (!try_module_get(d->driver->owner)) {
+               dev_err(&dev->dev, "failed to get the device driver module\n");
+               err = -EIO;
+               goto error_put_device;
+       }
+
+       if (using_genphy) {
                err = d->driver->probe(d);
                if (err >= 0)
                        err = device_bind_driver(d);
 
                if (err)
-                       goto error;
+                       goto error_module_put;
        }
 
        if (phydev->attached_dev) {
@@ -975,7 +986,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
        return err;
 
 error:
+       /* phy_detach() does all of the cleanup below */
        phy_detach(phydev);
+       return err;
+
+error_module_put:
+       module_put(d->driver->owner);
+error_put_device:
        put_device(d);
        if (ndev_owner != bus->owner)
                module_put(bus->owner);
@@ -1039,6 +1056,8 @@ void phy_detach(struct phy_device *phydev)
 
        phy_led_triggers_unregister(phydev);
 
+       module_put(phydev->mdio.dev.driver->owner);
+
        /* If the device had no specific driver before (i.e. - it
         * was using the generic driver), we unbind the device
         * from the generic driver so that there's a chance a
index 2cd10b26b65012ab882c67a433946dadda83b8f7..bfabe180053e414dee777e0e56b24eceef05c918 100644 (file)
@@ -1170,9 +1170,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & IFF_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
 
                if (!copy_from_iter_full(&gso, sizeof(gso), from))
                        return -EFAULT;
@@ -1183,7 +1185,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
-               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+               iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
        }
 
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1335,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
 
        total = skb->len + vlan_hlen + vnet_hdr_sz;
 
index 3daa41bdd4eae0e5d44451458cb456175f8aedb2..0acc9b640419a2e94bc9a2d3d43a5a2a65800c8b 100644 (file)
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct net_device *netdev;
        struct catc *catc;
        u8 broadcast[ETH_ALEN];
-       int i, pktsz;
+       int pktsz, ret;
 
        if (usb_set_interface(usbdev,
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
            (!catc->rx_urb) || (!catc->irq_urb)) {
                dev_err(&intf->dev, "No free urbs available.\n");
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_free;
        }
 
        /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                 catc->irq_buf, 2, catc_irq_done, catc, 1);
 
        if (!catc->is_f5u011) {
+               u32 *buf;
+               int i;
+
                dev_dbg(dev, "Checking memory size\n");
 
-               i = 0x12345678;
-               catc_write_mem(catc, 0x7a80, &i, 4);
-               i = 0x87654321; 
-               catc_write_mem(catc, 0xfa80, &i, 4);
-               catc_read_mem(catc, 0x7a80, &i, 4);
+               buf = kmalloc(4, GFP_KERNEL);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto fail_free;
+               }
+
+               *buf = 0x12345678;
+               catc_write_mem(catc, 0x7a80, buf, 4);
+               *buf = 0x87654321;
+               catc_write_mem(catc, 0xfa80, buf, 4);
+               catc_read_mem(catc, 0x7a80, buf, 4);
          
-               switch (i) {
+               switch (*buf) {
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
                        catc_set_reg(catc, RxBufCount, 32);
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                        dev_dbg(dev, "32k Memory\n");
                        break;
                }
+
+               kfree(buf);
          
                dev_dbg(dev, "Getting MAC from SEEROM.\n");
          
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        usb_set_intfdata(intf, catc);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
-       if (register_netdev(netdev) != 0) {
-               usb_set_intfdata(intf, NULL);
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -EIO;
-       }
+       ret = register_netdev(netdev);
+       if (ret)
+               goto fail_clear_intfdata;
+
        return 0;
+
+fail_clear_intfdata:
+       usb_set_intfdata(intf, NULL);
+fail_free:
+       usb_free_urb(catc->ctrl_urb);
+       usb_free_urb(catc->tx_urb);
+       usb_free_urb(catc->rx_urb);
+       usb_free_urb(catc->irq_urb);
+       free_netdev(netdev);
+       return ret;
 }
 
 static void catc_disconnect(struct usb_interface *intf)
index 24e803fe9a534c2e23dee6418496dd2755ff8de9..36674484c6fb9b73011619824f7bc60c50b9c1ad 100644 (file)
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
 
 static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
-                             indx, data, size, 1000);
+                             indx, buf, size, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       else if (ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
        return ret;
 }
 
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+                        const void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
-                             indx, data, size, 100);
+                             indx, buf, size, 100);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(&data, 1, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
-                             indx, &data, 1, 1000);
+                             indx, buf, 1, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
index 95b7bd0d7abcac85482da6067626e034a57b56b3..c81c79110cefca9443d614679d8e7cdd4b3295c3 100644 (file)
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
 */
 static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
 {
-       return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+                             indx, 0, buf, size, 500);
+       if (ret > 0 && ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
+       return ret;
 }
 
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
 {
-       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+                             indx, 0, buf, size, 500);
+       kfree(buf);
+       return ret;
 }
 
 static void async_set_reg_cb(struct urb *urb)
index 12071f1582df2c9b4e867e432f972659d3f78dc4..d9440bc022f2c40d965f6a6dd804f7ba74944cbc 100644 (file)
@@ -73,8 +73,6 @@ static        atomic_t iface_counter = ATOMIC_INIT(0);
 /* Private data structure */
 struct sierra_net_data {
 
-       u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
        u16 link_up;            /* air link up or down */
        u8 tx_hdr_template[4];  /* part of HIP hdr for tx'd packets */
 
@@ -122,6 +120,7 @@ struct param {
 
 /* LSI Protocol types */
 #define SIERRA_NET_PROTOCOL_UMTS      0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS   0x04
 /* LSI Coverage */
 #define SIERRA_NET_COVERAGE_NONE      0x00
 #define SIERRA_NET_COVERAGE_NOPACKET  0x01
@@ -129,7 +128,8 @@ struct param {
 /* LSI Session */
 #define SIERRA_NET_SESSION_IDLE       0x00
 /* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4  0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6  0x02
 
 struct lsi_umts {
        u8 protocol;
@@ -137,9 +137,14 @@ struct lsi_umts {
        __be16 length;
        /* eventually use a union for the rest - assume umts for now */
        u8 coverage;
-       u8 unused2[41];
+       u8 network_len; /* network name len */
+       u8 network[40]; /* network name (UCS2, bigendian) */
        u8 session_state;
        u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+       struct lsi_umts lsi;
        u8 link_type;
        u8 pdp_addr_len; /* NW-supplied PDP address len */
        u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@ struct lsi_umts {
        u8 reserved[8];
 } __packed;
 
+struct lsi_umts_dual {
+       struct lsi_umts lsi;
+       u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+       u8 pdp_addr4[4];  /* NW-supplied PDP IPv4 address (bigendian)) */
+       u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+       u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+       u8 unused4[23];
+       u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+       u8 dns1_addr4[4];  /* NW-supplied 1st DNS v4 address */
+       u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+       u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+       u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+       u8 dns2_addr4[4];  /* NW-supplied 2nd DNS v4 address */
+       u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+       u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+       u8 unused5[68];
+} __packed;
+
 #define SIERRA_NET_LSI_COMMON_LEN      4
-#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN        (sizeof(struct lsi_umts_single))
 #define SIERRA_NET_LSI_UMTS_STATUS_LEN \
        (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN     (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+       (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
 
 /* Forward definitions */
 static void sierra_sync_timer(unsigned long syncdata);
@@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
        dev->data[0] = (unsigned long)priv;
 }
 
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
 static inline int is_ip(struct sk_buff *skb)
 {
-       return skb->protocol == cpu_to_be16(ETH_P_IP);
+       return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+              skb->protocol == cpu_to_be16(ETH_P_IPV6);
 }
 
 /*
@@ -349,49 +376,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
 static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
 {
        struct lsi_umts *lsi = (struct lsi_umts *)data;
+       u32 expected_length;
 
-       if (datalen < sizeof(struct lsi_umts)) {
-               netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
-                               __func__, datalen,
-                               sizeof(struct lsi_umts));
+       if (datalen < sizeof(struct lsi_umts_single)) {
+               netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+                          __func__, datalen, sizeof(struct lsi_umts_single));
                return -1;
        }
 
-       if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
-               netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
-                               __func__, be16_to_cpu(lsi->length),
-                               (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
-               return -1;
+       /* Validate the session state */
+       if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
+               netdev_err(dev->net, "Session idle, 0x%02x\n",
+                          lsi->session_state);
+               return 0;
        }
 
        /* Validate the protocol  - only support UMTS for now */
-       if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
+       if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+               struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+               /* Validate the link type */
+               if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+                   single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+                       netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+                                  single->link_type);
+                       return -1;
+               }
+               expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+       } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+               expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+       } else {
                netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
-                       lsi->protocol);
+                          lsi->protocol);
                return -1;
        }
 
-       /* Validate the link type */
-       if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
-               netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
-                       lsi->link_type);
+       if (be16_to_cpu(lsi->length) != expected_length) {
+               netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+                          __func__, be16_to_cpu(lsi->length), expected_length);
                return -1;
        }
 
        /* Validate the coverage */
-       if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
-          || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+       if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+           lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
                netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
                return 0;
        }
 
-       /* Validate the session state */
-       if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
-               netdev_err(dev->net, "Session idle, 0x%02x\n",
-                       lsi->session_state);
-               return 0;
-       }
-
        /* Set link_sense true */
        return 1;
 }
@@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        u8      numendpoints;
        u16     fwattr = 0;
        int     status;
-       struct ethhdr *eth;
        struct sierra_net_data *priv;
        static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
                0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
        dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
 
-       /* we will have to manufacture ethernet headers, prepare template */
-       eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
-       memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
-       eth->h_proto = cpu_to_be16(ETH_P_IP);
-
        /* prepare shutdown message template */
        memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
        /* set context index initially to 0 - prepares tx hdr template */
@@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
 
                skb_pull(skb, hh.hdrlen);
 
-               /* We are going to accept this packet, prepare it */
-               memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
-                       ETH_HLEN);
+               /* We are going to accept this packet, prepare it.
+                * In case protocol is IPv6, keep it, otherwise force IPv4.
+                */
+               skb_reset_mac_header(skb);
+               if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+                       eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+               eth_zero_addr(eth_hdr(skb)->h_source);
+               memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
 
                /* Last packet in batch handled by usbnet */
                if (hh.payload_len.word == skb->len)
index d02ca1491d16cede66389540f8cb92dda5749ff3..8d3e53fac1dabc01ed875b6f8c2863bb908f770c 100644 (file)
@@ -91,7 +91,7 @@
 
 #define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
-       IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+       IWL8000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL8265_FW_PRE "iwlwifi-8265-"
 #define IWL8265_MODULE_FIRMWARE(api) \
index 636c8b03e31892bd30e3a3d7a6b1e9b8a8eb02ea..09e9e2e3ed040202f0cb40c1e326584b0fa7465a 100644 (file)
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                .frame_limit = IWL_FRAME_LIMIT,
        };
 
-       /* Make sure reserved queue is still marked as such (or allocated) */
-       mvm->queue_info[mvm_sta->reserved_queue].status =
-               IWL_MVM_QUEUE_RESERVED;
+       /* Make sure reserved queue is still marked as such (if allocated) */
+       if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+               mvm->queue_info[mvm_sta->reserved_queue].status =
+                       IWL_MVM_QUEUE_RESERVED;
 
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
index 63a051be832ed44b30978b4d031464b15765df20..bec7d9c46087d3c8fed48d5858f4f116797eceed 100644 (file)
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
-       thermal_zone_device_unregister(mvm->tz_device.tzone);
-       mvm->tz_device.tzone = NULL;
+       if (mvm->tz_device.tzone) {
+               thermal_zone_device_unregister(mvm->tz_device.tzone);
+               mvm->tz_device.tzone = NULL;
+       }
 }
 
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
-       thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
-       mvm->cooling_dev.cdev = NULL;
+       if (mvm->cooling_dev.cdev) {
+               thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+               mvm->cooling_dev.cdev = NULL;
+       }
 }
 #endif /* CONFIG_THERMAL */
 
index 691ddef1ae28eab7d2a193fca3a72baa7871e480..a33a06d58a9ae8496e9baa2cd7a6e2ac1f3087e2 100644 (file)
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+       char *fw_name;
 
        rtl8192ce_bt_reg_init(hw);
 
@@ -164,8 +164,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        }
 
        /* request fw */
-       if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version))
+               fw_name = "rtlwifi/rtl8192cfwU.bin";
+       else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
                fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+       else
+               fw_name = "rtlwifi/rtl8192cfw.bin";
 
        rtlpriv->max_fw_size = 0x4000;
        pr_info("Using firmware %s\n", fw_name);
index 8315fe73ecd04d355b216edc9e4abbda4fc81bf8..1e4125a98291245f5e806a79247a92ca1418092b 100644 (file)
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 {
        RING_IDX req_prod = queue->rx.req_prod_pvt;
        int notify;
+       int err = 0;
 
        if (unlikely(!netif_carrier_ok(queue->info->netdev)))
                return;
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                struct xen_netif_rx_request *req;
 
                skb = xennet_alloc_one_rx_buffer(queue);
-               if (!skb)
+               if (!skb) {
+                       err = -ENOMEM;
                        break;
+               }
 
                id = xennet_rxidx(req_prod);
 
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
 
        queue->rx.req_prod_pvt = req_prod;
 
-       /* Not enough requests? Try again later. */
-       if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
+       /* Try again later if there are not enough requests or skb allocation
+        * failed.
+        * Enough requests is quantified as the sum of newly created slots and
+        * the unconsumed slots at the backend.
+        */
+       if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
+           unlikely(err)) {
                mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
                return;
        }
@@ -1379,6 +1387,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
        for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
+               del_timer_sync(&queue->rx_refill_timer);
+
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                        unbind_from_irqhandler(queue->tx_irq, queue);
                if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1733,7 +1743,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
 
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
-               del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
 
@@ -1822,27 +1831,19 @@ static int talk_to_netback(struct xenbus_device *dev,
                xennet_destroy_queues(info);
 
        err = xennet_create_queues(info, &num_queues);
-       if (err < 0)
-               goto destroy_ring;
+       if (err < 0) {
+               xenbus_dev_fatal(dev, err, "creating queues");
+               kfree(info->queues);
+               info->queues = NULL;
+               goto out;
+       }
 
        /* Create shared ring, alloc event channel -- for each queue */
        for (i = 0; i < num_queues; ++i) {
                queue = &info->queues[i];
                err = setup_netfront(dev, queue, feature_split_evtchn);
-               if (err) {
-                       /* setup_netfront() will tidy up the current
-                        * queue on error, but we need to clean up
-                        * those already allocated.
-                        */
-                       if (i > 0) {
-                               rtnl_lock();
-                               netif_set_real_num_tx_queues(info->netdev, i);
-                               rtnl_unlock();
-                               goto destroy_ring;
-                       } else {
-                               goto out;
-                       }
-               }
+               if (err)
+                       goto destroy_ring;
        }
 
 again:
@@ -1932,9 +1933,10 @@ abort_transaction_no_dev_fatal:
        xenbus_transaction_end(xbt, 1);
  destroy_ring:
        xennet_disconnect_backend(info);
-       kfree(info->queues);
-       info->queues = NULL;
+       xennet_destroy_queues(info);
  out:
+       unregister_netdev(info->netdev);
+       xennet_free_netdev(info->netdev);
        return err;
 }
 
index a518cb1b59d4238b675fccd695f45003af380296..ce3e8dfa10ad5ccc5285621e5c5b27b5c557e16b 100644 (file)
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev)
        kfree(nsblk);
 }
 
-static struct device_type namespace_io_device_type = {
+static const struct device_type namespace_io_device_type = {
        .name = "nd_namespace_io",
        .release = namespace_io_release,
 };
 
-static struct device_type namespace_pmem_device_type = {
+static const struct device_type namespace_pmem_device_type = {
        .name = "nd_namespace_pmem",
        .release = namespace_pmem_release,
 };
 
-static struct device_type namespace_blk_device_type = {
+static const struct device_type namespace_blk_device_type = {
        .name = "nd_namespace_blk",
        .release = namespace_blk_release,
 };
@@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
        u32 flags = 0, remainder;
+       int rc, i, id = -1;
        u8 *uuid = NULL;
-       int rc, i;
 
        if (dev->driver || ndns->claim)
                return -EBUSY;
@@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                uuid = nspm->uuid;
+               id = nspm->id;
        } else if (is_namespace_blk(dev)) {
                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 
                uuid = nsblk->uuid;
                flags = NSLABEL_FLAG_LOCAL;
+               id = nsblk->id;
        }
 
        /*
@@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
        /*
         * Try to delete the namespace if we deleted all of its
-        * allocation, this is not the seed device for the region, and
-        * it is not actively claimed by a btt instance.
+        * allocation, this is not the seed or 0th device for the
+        * region, and it is not actively claimed by a btt, pfn, or dax
+        * instance.
         */
-       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+       if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
                nd_device_unregister(dev, ND_ASYNC);
 
        return rc;
index a2ac9e641aa9341f2fcca9fa8b968fd874e80a90..6c033c9a2f06921feb49e3ba396d6cf150e19801 100644 (file)
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        size = resource_size(&nsio->res);
        npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
        if (nd_pfn->mode == PFN_MODE_PMEM) {
-               unsigned long memmap_size;
-
                /*
                 * vmemmap_populate_hugepages() allocates the memmap array in
                 * HPAGE_SIZE chunks.
                 */
-               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
-               offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
-                               nd_pfn->align) - start;
+               offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+                               max(nd_pfn->align, HPAGE_SIZE)) - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K + dax_label_reserve,
                                nd_pfn->align) - start;
index 10c9c0ba8ff2394dc8b43d640e5dbd4d53c5e84e..ec0b4c11ccd9dc95cdb738d03ae292744e34127e 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include "../pci.h"
 #include "pciehp.h"
@@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot)
        pciehp_green_led_blink(p_slot);
 
        /* Check link training status */
-       pm_runtime_get_sync(&ctrl->pcie->port->dev);
        retval = pciehp_check_link_status(ctrl);
        if (retval) {
                ctrl_err(ctrl, "Failed to check link status\n");
@@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot)
                if (retval != -EEXIST)
                        goto err_exit;
        }
-       pm_runtime_put(&ctrl->pcie->port->dev);
 
        pciehp_green_led_on(p_slot);
        pciehp_set_attention_status(p_slot, 0);
        return 0;
 
 err_exit:
-       pm_runtime_put(&ctrl->pcie->port->dev);
        set_slot_off(ctrl, p_slot);
        return retval;
 }
@@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot)
        int retval;
        struct controller *ctrl = p_slot->ctrl;
 
-       pm_runtime_get_sync(&ctrl->pcie->port->dev);
        retval = pciehp_unconfigure_device(p_slot);
-       pm_runtime_put(&ctrl->pcie->port->dev);
        if (retval)
                return retval;
 
index 50c5003295ca535036b056d7855caf0c96473f13..7f73bacf13ed9ef212ef5c51d0ab301310be3a75 100644 (file)
@@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
        if (flags & PCI_IRQ_AFFINITY) {
                if (!affd)
                        affd = &msi_default_affd;
+
+               if (affd->pre_vectors + affd->post_vectors > min_vecs)
+                       return -EINVAL;
+
+               /*
+                * If there aren't any vectors left after applying the pre/post
+                * vectors don't bother with assigning affinity.
+                */
+               if (affd->pre_vectors + affd->post_vectors == min_vecs)
+                       affd = NULL;
        } else {
                if (WARN_ON(affd))
                        affd = NULL;
index a881c0d3d2e87e023bd9f68eb35b9d57ac2e3a9f..7904d02ffdb97e5f23d915e9c83edbf599ddf58b 100644 (file)
@@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
                        return false;
 
                /*
-                * Hotplug ports handled by firmware in System Management Mode
+                * Hotplug interrupts cannot be delivered if the link is down,
+                * so parents of a hotplug port must stay awake. In addition,
+                * hotplug ports handled by firmware in System Management Mode
                 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+                * For simplicity, disallow in general for now.
                 */
-               if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+               if (bridge->is_hotplug_bridge)
                        return false;
 
                if (pci_bridge_d3_force)
@@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
             !pci_pme_capable(dev, PCI_D3cold)) ||
 
            /* If it is a bridge it must be allowed to go to D3. */
-           !pci_power_manageable(dev) ||
-
-           /* Hotplug interrupts cannot be delivered if the link is down. */
-           dev->is_hotplug_bridge)
+           !pci_power_manageable(dev))
 
                *d3cold_ok = false;
 
index 17ac1dce32867051298a5489841de8b636835a68..3dd8bcbb3011babd4ad4271d6f6f64733bd9b3f1 100644 (file)
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        link = kzalloc(sizeof(*link), GFP_KERNEL);
        if (!link)
                return NULL;
+
        INIT_LIST_HEAD(&link->sibling);
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
-       if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+       /*
+        * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+        * hierarchies.
+        */
+       if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+           pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+               link->root = link;
+       } else {
                struct pcie_link_state *parent;
+
                parent = pdev->bus->parent->self->link_state;
                if (!parent) {
                        kfree(link);
                        return NULL;
                }
+
                link->parent = parent;
+               link->root = link->parent->root;
                list_add(&link->link, &parent->children);
        }
-       /* Setup a pointer to the root port link */
-       if (!link->parent)
-               link->root = link;
-       else
-               link->root = link->parent->root;
 
        list_add(&link->sibling, &link_list);
        pdev->link_state = link;
index 09172043d5890735127b0f79275a98704cdbd2b6..c617ec49e9edeeebb1f33b78fa6b7214fb23207c 100644 (file)
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
        BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
-                       BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
        BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
                        BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
                        BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
index c123488266ce74883ed8ba972b43103d136bb66e..d94aef17348b4b88f3952670886b863b952cfc60 100644 (file)
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
                                  int reg)
 {
        struct byt_community *comm = byt_get_community(vg, offset);
-       u32 reg_offset = 0;
+       u32 reg_offset;
 
        if (!comm)
                return NULL;
 
        offset -= comm->pin_base;
-       if (reg == BYT_INT_STAT_REG)
+       switch (reg) {
+       case BYT_INT_STAT_REG:
                reg_offset = (offset / 32) * 4;
-       else
+               break;
+       case BYT_DEBOUNCE_REG:
+               reg_offset = 0;
+               break;
+       default:
                reg_offset = comm->pad_map[offset] * 16;
+               break;
+       }
 
        return comm->reg_base + reg_offset + reg;
 }
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                        debounce = readl(db_reg);
                        debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
+                       if (arg)
+                               conf |= BYT_DEBOUNCE_EN;
+                       else
+                               conf &= ~BYT_DEBOUNCE_EN;
+
                        switch (arg) {
-                       case 0:
-                               conf &= BYT_DEBOUNCE_EN;
-                               break;
                        case 375:
                                debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                                debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
-                               ret = -EINVAL;
+                               if (arg)
+                                       ret = -EINVAL;
+                               break;
                        }
 
                        if (!ret)
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
+               raw_spin_lock(&vg->lock);
                pending = readl(reg);
+               raw_spin_unlock(&vg->lock);
                for_each_set_bit(pin, &pending, 32) {
                        virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
                        generic_handle_irq(virq);
index b21896126f760a5cbae044ab13cba527e9dbfeff..4d4ef42a39b5faaa1969d20a5aeeedffef90074c 100644 (file)
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
        unsigned int i;
        int ret;
 
+       if (!mrfld_buf_available(mp, pin))
+               return -ENOTSUPP;
+
        for (i = 0; i < nconfigs; i++) {
                switch (pinconf_to_config_param(configs[i])) {
                case PIN_CONFIG_BIAS_DISABLE:
index 0eb51e33cb1be5412ab11d10e7cdb474a2faa061..207a8de4e1ed851cf542aa4af008e8f74102cad3 100644 (file)
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
                        val = arg / 10 - 1;
                        break;
                case PIN_CONFIG_BIAS_DISABLE:
-                       val = 0;
-                       break;
+                       continue;
                case PIN_CONFIG_BIAS_PULL_UP:
                        if (arg == 0)
                                return -EINVAL;
index e6a512ebeae2762812212ac5b9264f92bb8252be..a3ade9e4ef478ed90311365a4e86db0cbbc394d5 100644 (file)
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
                        64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
                        BIT(3)),
        AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
-                AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+                AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
                 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
        AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
index a43b0e8a438d305a959d3d745c7c65bf796ca9f9..988a7472c2ab568c3d1c03d1092c0713073a6d28 100644 (file)
@@ -30,9 +30,6 @@
 #include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
        return config;
 }
 
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
-                             const struct regulator_desc *desc)
-{
-       struct fixed_voltage_config *config;
-       const char *supply_name;
-       struct gpio_desc *gpiod;
-       int ret;
-
-       config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
-       if (!config)
-               return ERR_PTR(-ENOMEM);
-
-       ret = device_property_read_string(dev, "supply-name", &supply_name);
-       if (!ret)
-               config->supply_name = supply_name;
-
-       gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
-       if (IS_ERR(gpiod))
-               return ERR_PTR(-ENODEV);
-
-       config->gpio = desc_to_gpio(gpiod);
-       config->enable_high = device_property_read_bool(dev,
-                                                       "enable-active-high");
-       gpiod_put(gpiod);
-
-       return config;
-}
-
 static struct regulator_ops fixed_voltage_ops = {
 };
 
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                                                     &drvdata->desc);
                if (IS_ERR(config))
                        return PTR_ERR(config);
-       } else if (ACPI_HANDLE(&pdev->dev)) {
-               config = acpi_get_fixed_voltage_config(&pdev->dev,
-                                                      &drvdata->desc);
-               if (IS_ERR(config))
-                       return PTR_ERR(config);
        } else {
                config = dev_get_platdata(&pdev->dev);
        }
index 4864b9d742c0f7915cc792aaacd692c2a7f305b0..716191046a70782b0007033dda6e1402c0d68ea3 100644 (file)
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
                        vsel = 62;
                else if ((min_uV > 1800000) && (min_uV <= 1900000))
                        vsel = 61;
-               else if ((min_uV > 1350000) && (min_uV <= 1800000))
+               else if ((min_uV > 1500000) && (min_uV <= 1800000))
                        vsel = 60;
                else if ((min_uV > 1350000) && (min_uV <= 1500000))
                        vsel = 59;
index c93c5a8fba32925584dbc28c60610786328d09ff..5dc673dc948785a79da8d070954323b9357385c1 100644 (file)
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
          will be called rtc-mpc5121.
 
 config RTC_DRV_JZ4740
-       bool "Ingenic JZ4740 SoC"
+       tristate "Ingenic JZ4740 SoC"
        depends on MACH_INGENIC || COMPILE_TEST
        help
          If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
          controllers.
 
+         This driver can also be buillt as a module. If so, the module
+         will be called rtc-jz4740.
+
 config RTC_DRV_LPC24XX
        tristate "NXP RTC for LPC178x/18xx/408x/43xx"
        depends on ARCH_LPC18XX || COMPILE_TEST
index 72918c1ba0928d4fc78d921db621c489fafb9701..64989afffa3daada4b062321c527f18bca142bbb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
                             JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
 
        jz4740_rtc_poweroff(dev_for_power_off);
-       machine_halt();
+       kernel_halt();
 }
 
 static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
        { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
        {},
 };
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
 
 static int jz4740_rtc_probe(struct platform_device *pdev)
 {
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
        { "jz4780-rtc", ID_JZ4780 },
        {}
 };
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
 
 static struct platform_driver jz4740_rtc_driver = {
        .probe   = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
        .id_table = jz4740_rtc_ids,
 };
 
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
index 75f820ca17b79b0574e3afd91df18998a3438c30..27ff38f839fc1c752385f430f55020fb934ed6de 100644 (file)
@@ -1583,7 +1583,7 @@ out:
 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req = NULL;
+       struct zfcp_fsf_req *req;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
-       if (req && !IS_ERR(req))
+       if (!retval)
                zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
        return retval;
 }
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
        struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-       struct zfcp_fsf_req *req = NULL;
+       struct zfcp_fsf_req *req;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
                zfcp_fsf_req_free(req);
 out:
        spin_unlock_irq(&qdio->req_q_lock);
-       if (req && !IS_ERR(req))
+       if (!retval)
                zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
        return retval;
 }
index 4f56b1003cc7d9d8c56dcb98ead00b0fb5348788..5b48bedd7c385f3b73adb97d45f691d540914f42 100644 (file)
@@ -50,9 +50,13 @@ struct aac_common aac_config = {
 
 static inline int aac_is_msix_mode(struct aac_dev *dev)
 {
-       u32 status;
+       u32 status = 0;
 
-       status = src_readl(dev, MUnit.OMR);
+       if (dev->pdev->device == PMC_DEVICE_S6 ||
+               dev->pdev->device == PMC_DEVICE_S7 ||
+               dev->pdev->device == PMC_DEVICE_S8) {
+               status = src_readl(dev, MUnit.OMR);
+       }
        return (status & AAC_INT_MODE_MSIX);
 }
 
index 99b747cedbebc517a78714db321743f0837834b6..0f807798c6245f8ba64b5ec3cced694d6a821c31 100644 (file)
@@ -3816,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
 static const struct target_core_fabric_ops ibmvscsis_ops = {
        .module                         = THIS_MODULE,
        .name                           = "ibmvscsis",
+       .max_data_sg_nents              = MAX_TXU / PAGE_SIZE,
        .get_fabric_name                = ibmvscsis_get_fabric_name,
        .tpg_get_wwn                    = ibmvscsis_get_fabric_wwn,
        .tpg_get_tag                    = ibmvscsis_get_tag,
index 75f3fce1c86773299704347fc0960fb5148ea53f..0b5b423b1db0d19a1a1dfb6fc38e168af4559d71 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/workqueue.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
+#include <linux/pci-aspm.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 #include <linux/raid_class.h>
@@ -4657,6 +4658,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        u32 response_code = 0;
        unsigned long flags;
+       unsigned int sector_sz;
 
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
        scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4715,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        }
 
        xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+       /* In case of bogus fw or device, we could end up having
+        * unaligned partial completion. We can force alignment here,
+        * then scsi-ml does not need to handle this misbehavior.
+        */
+       sector_sz = scmd->device->sector_size;
+       if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+                    xfer_cnt % sector_sz)) {
+               sdev_printk(KERN_INFO, scmd->device,
+                   "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+                           xfer_cnt, sector_sz);
+               xfer_cnt = round_down(xfer_cnt, sector_sz);
+       }
+
        scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
        if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
                log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
@@ -8746,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        switch (hba_mpi_version) {
        case MPI2_VERSION:
+               pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+                       PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
                /* Use mpt2sas driver host template for SAS 2.0 HBA's */
                shost = scsi_host_alloc(&mpt2sas_driver_template,
                  sizeof(struct MPT3SAS_ADAPTER));
index dc88a09f9043c9359cba9c276e523571235c1b50..a94b0b6bd0306379b4707b29be1347d1ce89e6b8 100644 (file)
@@ -3242,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
         * from a probe failure context.
         */
        if (!ha->rsp_q_map || !ha->rsp_q_map[0])
-               return;
+               goto free_irqs;
        rsp = ha->rsp_q_map[0];
 
        if (ha->flags.msix_enabled) {
@@ -3262,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
                free_irq(pci_irq_vector(ha->pdev, 0), rsp);
        }
 
+free_irqs:
        pci_free_irq_vectors(ha->pdev);
 }
 
index 0a000ecf0881411d4c01c1a95245d1eb9d9da771..40660461a4b5c3e56e61b124385ee044547a4741 100644 (file)
@@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                /* Don't abort commands in adapter during EEH
                                 * recovery as it's not accessible/responding.
                                 */
-                               if (!ha->flags.eeh_busy) {
+                               if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
                                        /* Get a reference to the sp and drop the lock.
                                         * The reference ensures this sp->done() call
                                         * - and not the call in qla2xxx_eh_abort() -
index ec91bd07f00a307337283cbea6f72c6e370a0170..c680d76413116c00b80193f5e7db9de2e13441b1 100644 (file)
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
 {
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
        struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+       unsigned long flags;
        int req_size;
+       int ret;
 
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                req_size = sizeof(cmd->req.cmd);
        }
 
-       if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+       ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+       if (ret == -EIO) {
+               cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+               spin_lock_irqsave(&req_vq->vq_lock, flags);
+               virtscsi_complete_cmd(vscsi, cmd);
+               spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+       } else if (ret != 0) {
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
        return 0;
 }
 
index 113f3d6c4b3a6cdeda3fce3abe729fbc927fd9d9..27f75b17679b8f19a5f6100a769dd7b1b0f77455 100644 (file)
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
 
 int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
 {
+       if (!arche_platform_change_state_cb)
+               return 0;
+
        return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
                                              pdata);
 }
 
 void gb_timesync_platform_unlock_bus(void)
 {
+       if (!arche_platform_change_state_cb)
+               return;
+
        arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
 }
 
index ee01f20d8b1110bec42a3aebf82ef69b79e63d80..9afa6bec3e6f44e22f1bf4fbcbd76e4476348fb3 100644 (file)
@@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                result = VM_FAULT_LOCKED;
                break;
        case -ENODATA:
+       case -EAGAIN:
        case -EFAULT:
                result = VM_FAULT_NOPAGE;
                break;
        case -ENOMEM:
                result = VM_FAULT_OOM;
                break;
-       case -EAGAIN:
-               result = VM_FAULT_RETRY;
-               break;
        default:
                result = VM_FAULT_SIGBUS;
                break;
index 1ebd13ef7bd333c5cbc488f7543eda58e29a2123..26929c44d70316d19a5691a400c0f87b4f63ff9d 100644 (file)
@@ -352,7 +352,15 @@ int core_enable_device_list_for_node(
                        kfree(new);
                        return -EINVAL;
                }
-               BUG_ON(orig->se_lun_acl != NULL);
+               if (orig->se_lun_acl != NULL) {
+                       pr_warn_ratelimited("Detected existing explicit"
+                               " se_lun_acl->se_lun_group reference for %s"
+                               " mapped_lun: %llu, failing\n",
+                                nacl->initiatorname, mapped_lun);
+                       mutex_unlock(&nacl->lun_entry_mutex);
+                       kfree(new);
+                       return -EINVAL;
+               }
 
                rcu_assign_pointer(new->se_lun, lun);
                rcu_assign_pointer(new->se_lun_acl, lun_acl);
index 4879e70e2eefb68ddc229effbe4a9822f369ce3f..df7b6e95c019dd91ee56671d93e03a3356fb51b9 100644 (file)
@@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
                                             int *post_ret)
 {
        struct se_device *dev = cmd->se_dev;
+       sense_reason_t ret = TCM_NO_SENSE;
 
        /*
         * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
         * sent to the backend driver.
         */
        spin_lock_irq(&cmd->t_state_lock);
-       if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+       if (cmd->transport_state & CMD_T_SENT) {
                cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
                *post_ret = 1;
+
+               if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        spin_unlock_irq(&cmd->t_state_lock);
 
@@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
         */
        up(&dev->caw_sem);
 
-       return TCM_NO_SENSE;
+       return ret;
 }
 
 static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
index 1cadc9eefa21a47e783160b874dbd2ce02f8f05f..437591bc7c0855d85102be0ca42d093531004343 100644 (file)
@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
 {
        struct se_node_acl *nacl = container_of(kref,
                                struct se_node_acl, acl_kref);
+       struct se_portal_group *se_tpg = nacl->se_tpg;
 
-       complete(&nacl->acl_free_comp);
+       if (!nacl->dynamic_stop) {
+               complete(&nacl->acl_free_comp);
+               return;
+       }
+
+       mutex_lock(&se_tpg->acl_node_mutex);
+       list_del(&nacl->acl_list);
+       mutex_unlock(&se_tpg->acl_node_mutex);
+
+       core_tpg_wait_for_nacl_pr_ref(nacl);
+       core_free_device_list_for_node(nacl, se_tpg);
+       kfree(nacl);
 }
 
 void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
 void transport_free_session(struct se_session *se_sess)
 {
        struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
        /*
         * Drop the se_node_acl->nacl_kref obtained from within
         * core_tpg_get_initiator_node_acl().
         */
        if (se_nacl) {
+               struct se_portal_group *se_tpg = se_nacl->se_tpg;
+               const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+               unsigned long flags;
+
                se_sess->se_node_acl = NULL;
+
+               /*
+                * Also determine if we need to drop the extra ->cmd_kref if
+                * it had been previously dynamically generated, and
+                * the endpoint is not caching dynamic ACLs.
+                */
+               mutex_lock(&se_tpg->acl_node_mutex);
+               if (se_nacl->dynamic_node_acl &&
+                   !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+                       spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+                       if (list_empty(&se_nacl->acl_sess_list))
+                               se_nacl->dynamic_stop = true;
+                       spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+                       if (se_nacl->dynamic_stop)
+                               list_del(&se_nacl->acl_list);
+               }
+               mutex_unlock(&se_tpg->acl_node_mutex);
+
+               if (se_nacl->dynamic_stop)
+                       target_put_nacl(se_nacl);
+
                target_put_nacl(se_nacl);
        }
        if (se_sess->sess_cmd_map) {
@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
 void transport_deregister_session(struct se_session *se_sess)
 {
        struct se_portal_group *se_tpg = se_sess->se_tpg;
-       const struct target_core_fabric_ops *se_tfo;
-       struct se_node_acl *se_nacl;
        unsigned long flags;
-       bool drop_nacl = false;
 
        if (!se_tpg) {
                transport_free_session(se_sess);
                return;
        }
-       se_tfo = se_tpg->se_tpg_tfo;
 
        spin_lock_irqsave(&se_tpg->session_lock, flags);
        list_del(&se_sess->sess_list);
@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
        se_sess->fabric_sess_ptr = NULL;
        spin_unlock_irqrestore(&se_tpg->session_lock, flags);
 
-       /*
-        * Determine if we need to do extra work for this initiator node's
-        * struct se_node_acl if it had been previously dynamically generated.
-        */
-       se_nacl = se_sess->se_node_acl;
-
-       mutex_lock(&se_tpg->acl_node_mutex);
-       if (se_nacl && se_nacl->dynamic_node_acl) {
-               if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
-                       list_del(&se_nacl->acl_list);
-                       drop_nacl = true;
-               }
-       }
-       mutex_unlock(&se_tpg->acl_node_mutex);
-
-       if (drop_nacl) {
-               core_tpg_wait_for_nacl_pr_ref(se_nacl);
-               core_free_device_list_for_node(se_nacl, se_tpg);
-               se_sess->se_node_acl = NULL;
-               kfree(se_nacl);
-       }
        pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
                se_tpg->se_tpg_tfo->get_fabric_name());
        /*
         * If last kref is dropping now for an explicit NodeACL, awake sleeping
         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
         * removal context from within transport_free_session() code.
+        *
+        * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+        * to release all remaining generate_node_acl=1 created ACL resources.
         */
 
        transport_free_session(se_sess);
@@ -3110,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto check_stop;
        }
-       cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3123,11 +3139,25 @@ int transport_generic_handle_tmr(
        struct se_cmd *cmd)
 {
        unsigned long flags;
+       bool aborted = false;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->transport_state |= CMD_T_ACTIVE;
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               aborted = true;
+       } else {
+               cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+               cmd->transport_state |= CMD_T_ACTIVE;
+       }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
+       if (aborted) {
+               pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+                       "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+                       cmd->se_tmr_req->ref_task_tag, cmd->tag);
+               transport_cmd_check_stop_to_fabric(cmd);
+               return 0;
+       }
+
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;
index d828b3b5000bf421826b9823efcbac8d6b2d58a3..cac5a20a4de07ba554151c6099182c9e675efbed 100644 (file)
@@ -864,7 +864,7 @@ out:
                        " CHECK_CONDITION -> sending response\n", rc);
                ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
        }
-       target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+       target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
 }
 
 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
index d2e50a27140c9254be2a80b6c6ae69bc71a93b4a..24f9f98968a5d860f83920287a5b7deb4c98bed6 100644 (file)
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE easy key (easykey.25) MIDI controller  */
+       { USB_DEVICE(0x0218, 0x0401), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* HP 5300/5370C scanner */
        { USB_DEVICE(0x03f0, 0x0701), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5490fc51638ede3c565eff9036ff3beaf884d3a9..fd80c1b9c8234cf4de8371c7ca4e528bc4712fc3 100644 (file)
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
                        return -EINVAL;
                length = le32_to_cpu(d->dwSize);
+               if (len < length)
+                       return -EINVAL;
                type = le32_to_cpu(d->dwPropertyDataType);
                if (type < USB_EXT_PROP_UNICODE ||
                    type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                        return -EINVAL;
                }
                pnl = le16_to_cpu(d->wPropertyNameLength);
+               if (length < 14 + pnl) {
+                       pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+                                 length, pnl, type);
+                       return -EINVAL;
+               }
                pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
                if (length != 14 + pnl + pdl) {
                        pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
                }
        }
        if (flags & (1 << i)) {
+               if (len < 4) {
+                       goto error;
+               }
                os_descs_count = get_unaligned_le32(data);
                data += 4;
                len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
 
        ENTER();
 
-       if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+       if (unlikely(len < 16 ||
+                    get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
                     get_unaligned_le32(data + 4) != len))
                goto error;
        str_count  = get_unaligned_le32(data + 8);
index fca288bbc8009580ba96198ce1a2a49330074d20..772f1582124255d749ab470a42394498af8f9741 100644 (file)
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                                | MUSB_PORT_STAT_RESUME;
                                musb->rh_timer = jiffies
                                        + msecs_to_jiffies(USB_RESUME_TIMEOUT);
-                               musb->need_finish_resume = 1;
-
                                musb->xceiv->otg->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
                                musb_host_resume_root_hub(musb);
+                               schedule_delayed_work(&musb->finish_resume_work,
+                                       msecs_to_jiffies(USB_RESUME_TIMEOUT));
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 static void musb_irq_work(struct work_struct *data)
 {
        struct musb *musb = container_of(data, struct musb, irq_work.work);
+       int error;
+
+       error = pm_runtime_get_sync(musb->controller);
+       if (error < 0) {
+               dev_err(musb->controller, "Could not enable: %i\n", error);
+
+               return;
+       }
 
        musb_pm_runtime_check_session(musb);
 
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
                musb->xceiv_old_state = musb->xceiv->otg->state;
                sysfs_notify(&musb->controller->kobj, NULL, "mode");
        }
+
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
        mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
        if ((devctl & mask) != (musb->context.devctl & mask))
                musb->port1_status = 0;
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                                     msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
 
        /*
         * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
 
        musb_restore_context(musb);
 
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                               msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
-
        spin_lock_irqsave(&musb->lock, flags);
        error = musb_run_resume_work(musb);
        if (error)
index ade902ea1221e18543de05a5188c715d86af7398..ce5a18c98c6d1134231a29fa9229cf6804a0ae6e 100644 (file)
@@ -410,7 +410,6 @@ struct musb {
 
        /* is_suspended means USB B_PERIPHERAL suspend */
        unsigned                is_suspended:1;
-       unsigned                need_finish_resume :1;
 
        /* may_wakeup means remote wakeup is enabled */
        unsigned                may_wakeup:1;
index 7ce31a4c7e7fd3d186e8e05b20b9a3ca52700b6c..42cc72e54c051b2115c358bcee8bfc534258d206 100644 (file)
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 46fca6b7584686744a9e79aae0bd78db08192813..1db4b61bdf7bd710d7be6e3ff81b97102d76fbe9 100644 (file)
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
index e3b7af8adfb73ccefa92d4ba3c2c927a044dfa43..09d9be88209e1ce6b1f53dc052a53c5e4c491336 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
 #define IODATA_PRODUCT_ID      0x0a03
index 1bc6089b90083a05e0ef3e4ff71c0ef752e3831a..696458db7e3c45e661a9825d05df0fe25dc0a832 100644 (file)
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x413c, 0x81a6)},   /* Dell DW5570 QDL (MC8805) */
        {USB_DEVICE(0x1199, 0x68a4)},   /* Sierra Wireless QDL */
        {USB_DEVICE(0x1199, 0x68a5)},   /* Sierra Wireless Modem */
        {USB_DEVICE(0x1199, 0x68a8)},   /* Sierra Wireless QDL */
index 128d10282d1632693dc40819ff8b39485ba1e1de..59b3f62a2d64ebd85be990630e09a48c7c7f2f1c 100644 (file)
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
                mutex_lock(&container->lock);
 
                ret = tce_iommu_create_default_window(container);
-               if (ret)
-                       return ret;
-
-               ret = tce_iommu_create_window(container, create.page_shift,
-                               create.window_size, create.levels,
-                               &create.start_addr);
+               if (!ret)
+                       ret = tce_iommu_create_window(container,
+                                       create.page_shift,
+                                       create.window_size, create.levels,
+                                       &create.start_addr);
 
                mutex_unlock(&container->lock);
 
@@ -1246,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
                struct iommu_table_group *table_group)
 {
+       long i, ret = 0;
+
        if (!table_group->ops->create_table || !table_group->ops->set_window ||
                        !table_group->ops->release_ownership) {
                WARN_ON_ONCE(1);
@@ -1254,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 
        table_group->ops->take_ownership(table_group);
 
+       /* Set all windows to the new group */
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+               struct iommu_table *tbl = container->tables[i];
+
+               if (!tbl)
+                       continue;
+
+               ret = table_group->ops->set_window(table_group, i, tbl);
+               if (ret)
+                       goto release_exit;
+       }
+
        return 0;
+
+release_exit:
+       for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
+               table_group->ops->unset_window(table_group, i);
+
+       table_group->ops->release_ownership(table_group);
+
+       return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
index d6432603880c1343ea2451eba6df1973e6d61822..8f99fe08de02e7b48725a99d682055c03056b82a 100644 (file)
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 
 static void vhost_init_is_le(struct vhost_virtqueue *vq)
 {
-       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
-               vq->is_le = true;
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+               || virtio_legacy_is_little_endian();
 }
 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 
 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 {
-       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_init_is_le(vq);
 }
 
 struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
        int r;
        bool is_le = vq->is_le;
 
-       if (!vq->private_data) {
-               vhost_reset_is_le(vq);
+       if (!vq->private_data)
                return 0;
-       }
 
        vhost_init_is_le(vq);
 
index 7e38ed79c3fc0f2c095164d480f75b31630a6694..409aeaa49246a0edd7c6da07ca38b58c3f876109 100644 (file)
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
-       /*
-        * On ARM-based machines, the DMA ops will do the right thing,
-        * so always use them with legacy devices.
-        */
-       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
-               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
-
        return false;
 }
 
index 7f390849343b3e42b9399c2ac58b948c87944902..c4444d6f439f676cee59a20322d0f88fcb3cc4a3 100644 (file)
@@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
        unsigned long buf_offset;
        unsigned long current_buf_start;
        unsigned long start_byte;
+       unsigned long prev_start_byte;
        unsigned long working_bytes = total_out - buf_start;
        unsigned long bytes;
        char *kaddr;
@@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                if (!bio->bi_iter.bi_size)
                        return 0;
                bvec = bio_iter_iovec(bio, bio->bi_iter);
-
+               prev_start_byte = start_byte;
                start_byte = page_offset(bvec.bv_page) - disk_start;
 
                /*
-                * make sure our new page is covered by this
-                * working buffer
+                * We need to make sure we're only adjusting
+                * our offset into compression working buffer when
+                * we're switching pages.  Otherwise we can incorrectly
+                * keep copying when we were actually done.
                 */
-               if (total_out <= start_byte)
-                       return 1;
+               if (start_byte != prev_start_byte) {
+                       /*
+                        * make sure our new page is covered by this
+                        * working buffer
+                        */
+                       if (total_out <= start_byte)
+                               return 1;
 
-               /*
-                * the next page in the biovec might not be adjacent
-                * to the last page, but it might still be found
-                * inside this working buffer. bump our offset pointer
-                */
-               if (total_out > start_byte &&
-                   current_buf_start < start_byte) {
-                       buf_offset = start_byte - buf_start;
-                       working_bytes = total_out - start_byte;
-                       current_buf_start = buf_start + buf_offset;
+                       /*
+                        * the next page in the biovec might not be adjacent
+                        * to the last page, but it might still be found
+                        * inside this working buffer. bump our offset pointer
+                        */
+                       if (total_out > start_byte &&
+                           current_buf_start < start_byte) {
+                               buf_offset = start_byte - buf_start;
+                               working_bytes = total_out - start_byte;
+                               current_buf_start = buf_start + buf_offset;
+                       }
                }
        }
 
index 33f967d30b2ad1d555015baea2bccc5eba05d1d5..21e51b0ba188a37be6b71cab9218a8b9457b86f8 100644 (file)
@@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int
 #ifdef CONFIG_COMPAT
 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
+       /*
+        * These all access 32-bit values anyway so no further
+        * handling is necessary.
+        */
        switch (cmd) {
        case FS_IOC32_GETFLAGS:
                cmd = FS_IOC_GETFLAGS;
@@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
-       default:
-               return -ENOIOCTLCMD;
        }
 
        return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
index 8f6a2a5863b9d9275bfb6afb00fc16b867101275..a27fc8791551cc86ca14d2d65e7870990a393f1e 100644 (file)
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
                        rc = -ENOMEM;
                        goto error_exit;
                }
+               spin_lock_init(&cifsFile->file_info_lock);
                file->private_data = cifsFile;
                cifsFile->tlink = cifs_get_tlink(tlink);
                tcon = tlink_tcon(tlink);
index 3af2da5e64ce77fa8ae4b3f294c82882d350120f..c45598b912e14c981fdeb002b01c1535218c0ff2 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct blk_dax_ctl dax = { 0 };
                ssize_t map_len;
 
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
                dax.sector = dax_iomap_sector(iomap, pos);
                dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
                map_len = dax_map_atomic(iomap->bdev, &dax);
index 4304072161aa08c14d24291bf24eb2481c567874..40d61077bead88e39abff93bcdb185941462bfcb 100644 (file)
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
                        if (invalidate)
                                set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+                       clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
                        fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
                }
        } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
                                 TASK_UNINTERRUPTIBLE);
 
+       /* Make sure any pending writes are cancelled. */
+       if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+               fscache_invalidate_writes(cookie);
+
        /* Reset the cookie state if it wasn't relinquished */
        if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
                atomic_inc(&cookie->n_active);
index 9b28649df3a1fdc6f0f0c23b58b03db94dd69eb2..a8aa00be44442f59d6cf08516ab7403d0c02ab9e 100644 (file)
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        cookie->flags           = 1 << FSCACHE_COOKIE_ENABLED;
 
        spin_lock_init(&cookie->lock);
+       spin_lock_init(&cookie->stores_lock);
        INIT_HLIST_HEAD(&cookie->backing_objects);
 
        /* check the netfs type is not already present */
index 9e792e30f4db47b38c6db644487c440a2e12febb..7a182c87f37805f1a5fa6719f5cc06cf3dd38552 100644 (file)
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
 
 #define __STATE_NAME(n) fscache_osm_##n
 #define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE,     "LCFL", fscache_lookup_failure);
 static WORK_STATE(KILL_OBJECT,         "KILL", fscache_kill_object);
 static WORK_STATE(KILL_DEPENDENTS,     "KDEP", fscache_kill_dependents);
 static WORK_STATE(DROP_OBJECT,         "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD,         "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD,         "DEAD", fscache_object_dead);
 
 static WAIT_STATE(WAIT_FOR_INIT,       "?INI",
                  TRANSIT_TO(INIT_OBJECT,       1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
        event = -1;
        if (new_state == NO_TRANSIT) {
                _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
+                       _leave(" [dead]");
+                       return;
+               }
                fscache_enqueue_object(object);
                event_mask = object->oob_event_mask;
                goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
        object->state = state = new_state;
 
        if (state->work) {
-               if (unlikely(state->work == ((void *)2UL))) {
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
                        _leave(" [dead]");
                        return;
                }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
        fscache_mark_object_dead(object);
        object->oob_event_mask = 0;
 
+       if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+               /* Reject any new read/write ops and abort any that are pending. */
+               clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+               fscache_cancel_all_ops(object);
+       }
+
        if (list_empty(&object->dependents) &&
            object->n_ops == 0 &&
            object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
        }
 }
 EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead.  We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+                                                      int event)
+{
+       if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+                             &object->flags))
+               return NO_TRANSIT;
+
+       WARN(true, "FS-Cache object redispatched after death");
+       return NO_TRANSIT;
+}
index 354a123f170e534a016f74ca7006458e3b823ef8..a51cb4c07d4d8cd3a09715361c84126cecae2ca2 100644 (file)
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 
        BUG_ON(pos + len > iomap->offset + iomap->length);
 
+       if (fatal_signal_pending(current))
+               return -EINTR;
+
        page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
        if (!page)
                return -ENOMEM;
index 596205d939a1f43f1faa292d31680377045a8589..1fc07a9c70e9c6028342e8c97d183dfe914a343b 100644 (file)
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        struct nfs4_layout_stateid *ls;
        struct nfs4_stid *stp;
 
-       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
+                                       nfsd4_free_layout_stateid);
        if (!stp)
                return NULL;
-       stp->sc_free = nfsd4_free_layout_stateid;
+
        get_nfs4_file(fp);
        stp->sc_file = fp;
 
index 4b4beaaa4eaac01233f874c7dfdb8d1a6d7cd3d6..a0dee8ae9f97f16a18e40ba19f8e84a45ad1a02b 100644 (file)
@@ -633,8 +633,8 @@ out:
        return co;
 }
 
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-                                        struct kmem_cache *slab)
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *))
 {
        struct nfs4_stid *stid;
        int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
        idr_preload_end();
        if (new_id < 0)
                goto out_free;
+
+       stid->sc_free = sc_free;
        stid->sc_client = cl;
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
 {
        struct nfs4_stid *stid;
-       struct nfs4_ol_stateid *stp;
 
-       stid = nfs4_alloc_stid(clp, stateid_slab);
+       stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
        if (!stid)
                return NULL;
 
-       stp = openlockstateid(stid);
-       stp->st_stid.sc_free = nfs4_free_ol_stateid;
-       return stp;
+       return openlockstateid(stid);
 }
 
 static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
                goto out_dec;
        if (delegation_blocked(&current_fh->fh_handle))
                goto out_dec;
-       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
+       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
        if (dp == NULL)
                goto out_dec;
 
-       dp->dl_stid.sc_free = nfs4_free_deleg;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
        get_nfs4_file(fp);
        stp->st_stid.sc_file = fp;
-       stp->st_stid.sc_free = nfs4_free_lock_stateid;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        lst = find_lock_stateid(lo, fi);
        if (lst == NULL) {
                spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab);
+               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
                if (ns == NULL)
                        return NULL;
 
index c9399366f9dfc73b343d079fbad2dc2127927aae..4516e8b7d776305d94fb89f86256ee3fc54dec27 100644 (file)
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     stateid_t *stateid, unsigned char typemask,
                     struct nfs4_stid **s, struct nfsd_net *nn);
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-               struct kmem_cache *slab);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *));
 void nfs4_unhash_stid(struct nfs4_stid *s);
 void nfs4_put_stid(struct nfs4_stid *s);
 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
index a2066e6dee9058ac41b3c51cfcad83f0796e0598..2726536489b19a30394226a27e7a0183690e6b4c 100644 (file)
@@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_ACTIVE,        PG_active);
        u |= kpf_copy_bit(k, KPF_RECLAIM,       PG_reclaim);
 
-       u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
+       if (PageSwapCache(page))
+               u |= 1 << KPF_SWAPCACHE;
        u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
 
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
index 27c059e1760a8918a0dde19cb884c5ff4563606c..1d887efaaf718c497a233b63286914cd9a498a00 100644 (file)
@@ -280,7 +280,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
                                           1, id, type, PSTORE_TYPE_PMSG, 0);
 
        /* ftrace is last since it may want to dynamically allocate memory. */
-       if (!prz_ok(prz)) {
+       if (!prz_ok(prz) && cxt->fprzs) {
                if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
                        prz = ramoops_get_next_prz(cxt->fprzs,
                                        &cxt->ftrace_read_cnt, 1, id, type,
index 63554e9f6e0c68595943e27d2734ad4fa8271007..719db1968d8177a91028fd5e8bd6068f5d71c491 100644 (file)
@@ -9,18 +9,15 @@
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 8
 #endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 8
-#endif
 #else
 #define __put .long
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 4
 #endif
+#endif
 #ifndef KCRC_ALIGN
 #define KCRC_ALIGN 4
 #endif
-#endif
 
 #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
 #define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
        .section ___kcrctab\sec+\name,"a"
        .balign KCRC_ALIGN
 KSYM(__kcrctab_\name):
-       __put KSYM(__crc_\name)
+#if defined(CONFIG_MODULE_REL_CRCS)
+       .long KSYM(__crc_\name) - .
+#else
+       .long KSYM(__crc_\name)
+#endif
        .weak KSYM(__crc_\name)
        .previous
 #endif
index 192016e2b5183c7a22fd13fd21372de5862f44d8..9c4ee144b5f6b799baf92a9722cddcaee3eb2b4f 100644 (file)
@@ -517,6 +517,7 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node */
        struct drm_minor *primary;              /**< Primary node */
        struct drm_minor *render;               /**< Render node */
+       bool registered;
 
        /* currently active master for this device. Protected by master_mutex */
        struct drm_master *master;
index a9b95246e26efcf3d44cd5afc85f7031f0fa77fe..045a97cbeba24f44eb1b1d5582b4145801fcafdd 100644 (file)
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
         * core drm connector interfaces. Everything added from this callback
         * should be unregistered in the early_unregister callback.
         *
+        * This is called while holding drm_connector->mutex.
+        *
         * Returns:
         *
         * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
         * late_register(). It is called from drm_connector_unregister(),
         * early in the driver unload sequence to disable userspace access
         * before data structures are torndown.
+        *
+        * This is called while holding drm_connector->mutex.
         */
        void (*early_unregister)(struct drm_connector *connector);
 
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -607,6 +610,13 @@ struct drm_connector {
 
        char *name;
 
+       /**
+        * @mutex: Lock for general connector state, but currently only protects
+        * @registered. Most of the connector state is still protected by the
+        * mutex in &drm_mode_config.
+        */
+       struct mutex mutex;
+
        /**
         * @index: Compacted connector index, which matches the position inside
         * the mode_config.list for drivers not supporting hot-add/removing. Can
@@ -620,6 +630,10 @@ struct drm_connector {
        bool interlace_allowed;
        bool doublescan_allowed;
        bool stereo_allowed;
+       /**
+        * @registered: Is this connector exposed (registered) with userspace?
+        * Protected by @mutex.
+        */
        bool registered;
        struct list_head modes; /* list of modes on this connector */
 
index d67ab83823adc81234b0380feca85877996817db..79591c3660cc1dc61e1e3f4b501c470161c41eb9 100644 (file)
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
 {
        if (err == 0)
                return VM_FAULT_LOCKED;
-       if (err == -EFAULT)
+       if (err == -EFAULT || err == -EAGAIN)
                return VM_FAULT_NOPAGE;
        if (err == -ENOMEM)
                return VM_FAULT_OOM;
-       if (err == -EAGAIN)
-               return VM_FAULT_RETRY;
        /* -ENOSPC, -EDQUOT, -EIO ... */
        return VM_FAULT_SIGBUS;
 }
index a0875001b13c84ad70a9b2909654e9ffb6824c58..df08a41d5be5f26cfa4cdc74935f5eae7fa51385 100644 (file)
@@ -45,10 +45,9 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
-                           canid_t mask,
-                           void (*func)(struct sk_buff *, void *),
-                           void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+                   void (*func)(struct sk_buff *, void *),
+                   void *data, char *ident, struct sock *sk);
 
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
                              canid_t mask,
index d936a0021839cca651e19ec43e71b8f21cb69cf0..921acaaa16017979df0722fb9803b204d77c0be0 100644 (file)
@@ -8,9 +8,7 @@ enum cpuhp_state {
        CPUHP_CREATE_THREADS,
        CPUHP_PERF_PREPARE,
        CPUHP_PERF_X86_PREPARE,
-       CPUHP_PERF_X86_UNCORE_PREP,
        CPUHP_PERF_X86_AMD_UNCORE_PREP,
-       CPUHP_PERF_X86_RAPL_PREP,
        CPUHP_PERF_BFIN,
        CPUHP_PERF_POWER,
        CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
-       CPUHP_AP_PERF_X86_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_STARTING,
        CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
index c717f5ea88cb7e7f04471f34ac14bcf9bc75a4d1..b3d2c1a89ac48fb8257c32cf070e551f1cc22c4e 100644 (file)
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
 static inline int cpumask_parse_user(const char __user *buf, int len,
                                     struct cpumask *dstp)
 {
-       return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
                                     struct cpumask *dstp)
 {
        return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
-                                    nr_cpu_ids);
+                                    nr_cpumask_bits);
 }
 
 /**
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
        char *nl = strchr(buf, '\n');
        unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
-       return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
  */
 static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
 {
-       return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
+       return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
 }
 
 /**
index 2a0f61fbc7310e61f5927c31250e208d217c3e26..1a1dfdb2a5c6d8806d11e2cd58722e304f293054 100644 (file)
@@ -43,12 +43,19 @@ extern struct module __this_module;
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
+#if defined(CONFIG_MODULE_REL_CRCS)
 #define __CRC_SYMBOL(sym, sec)                                         \
-       extern __visible void *__crc_##sym __attribute__((weak));       \
-       static const unsigned long __kcrctab_##sym                      \
-       __used                                                          \
-       __attribute__((section("___kcrctab" sec "+" #sym), used))       \
-       = (unsigned long) &__crc_##sym;
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n"     \
+           "   .previous                                       \n");
+#else
+#define __CRC_SYMBOL(sym, sec)                                         \
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .previous                                       \n");
+#endif
 #else
 #define __CRC_SYMBOL(sym, sec)
 #endif
index 13ba552e6c094e82ee8b952b26d9f0bf5eeebc39..4c467ef50159db533ecb567a86eeaf6e1e81e632 100644 (file)
@@ -360,6 +360,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE 7       /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD  8       /* T if object has been dispatched after death */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
index 42fe43fb0c80605f9553c746f04310f43914f683..183efde54269e18c5d4d1eda7dc448717fe85800 100644 (file)
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
        u32 ring_data_startoffset;
        u32 priv_write_index;
        u32 priv_read_index;
+       u32 cached_read_index;
 };
 
 /*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
        return write;
 }
 
+static inline u32 hv_get_cached_bytes_to_write(
+       const struct hv_ring_buffer_info *rbi)
+{
+       u32 read_loc, write_loc, dsize, write;
+
+       dsize = rbi->ring_datasize;
+       read_loc = rbi->cached_read_index;
+       write_loc = rbi->ring_buffer->write_index;
+
+       write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       return write;
+}
 /*
  * VMBUS version is 32 bit entity broken up into
  * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 
 static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 {
-       u32 cur_write_sz;
+       u32 cur_write_sz, cached_write_sz;
        u32 pending_sz;
        struct hv_ring_buffer_info *rbi = &channel->inbound;
 
@@ -1512,12 +1526,24 @@ static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 
        cur_write_sz = hv_get_bytes_to_write(rbi);
 
-       if (cur_write_sz >= pending_sz)
+       if (cur_write_sz < pending_sz)
+               return;
+
+       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+       if (cached_write_sz < pending_sz)
                vmbus_setevent(channel);
 
        return;
 }
 
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+       struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+       rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
 /*
  * An API to support in-place processing of incoming VMBUS packets.
  */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
  * This call commits the read index and potentially signals the host.
  * Here is the pattern for using the "in-place" consumption APIs:
  *
+ * init_cached_read_index();
+ *
  * while (get_next_pkt_raw() {
  *     process the packet "in-place";
  *     put_pkt_raw();
index e79875574b393f33ed183fdc8f44277a49c26ba8..39e3254e5769d7da1d0f74cdfdf90bd9b40cfc8a 100644 (file)
@@ -184,6 +184,7 @@ struct irq_data {
  *
  * IRQD_TRIGGER_MASK           - Mask for the trigger type bits
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_ACTIVATED              - Interrupt has already been activated
  * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
  * IRQD_PER_CPU                        - Interrupt is per cpu
  * IRQD_AFFINITY_SET           - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
        IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_ACTIVATED                  = (1 <<  9),
        IRQD_NO_BALANCING               = (1 << 10),
        IRQD_PER_CPU                    = (1 << 11),
        IRQD_AFFINITY_SET               = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
        return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
 }
 
+static inline bool irqd_is_activated(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_ACTIVATED;
+}
+
+static inline void irqd_set_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) |= IRQD_ACTIVATED;
+}
+
+static inline void irqd_clr_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) &= ~IRQD_ACTIVATED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
index fd7ff3d91e6a920ff084beca09d10b5b9abba981..ef3d4f67118ce0f60789e6e749a4773754e01e87 100644 (file)
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+       return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)                                \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               ((n) == 0 || (n) == 1) ? 0 :    \
+               ilog2((n) - 1) + 1) :           \
+       __order_base_2(n)                       \
+)
 #endif /* _LINUX_LOG2_H */
index c1784c0b4f3585e0d20ca8253813c31d47f11c04..134a2f69c21abf7921181af0adff033bb459edc5 100644 (file)
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+       unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
 typedef void (*online_page_callback_t)(struct page *page);
index 7c84273d60b963d44c032761cac62194e82d1198..cc7cba219b207de5536f6f9e9353d1a20201e4af 100644 (file)
@@ -346,7 +346,7 @@ struct module {
 
        /* Exported symbols */
        const struct kernel_symbol *syms;
-       const unsigned long *crcs;
+       const s32 *crcs;
        unsigned int num_syms;
 
        /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
        /* GPL-only exported symbols. */
        unsigned int num_gpl_syms;
        const struct kernel_symbol *gpl_syms;
-       const unsigned long *gpl_crcs;
+       const s32 *gpl_crcs;
 
 #ifdef CONFIG_UNUSED_SYMBOLS
        /* unused exported symbols. */
        const struct kernel_symbol *unused_syms;
-       const unsigned long *unused_crcs;
+       const s32 *unused_crcs;
        unsigned int num_unused_syms;
 
        /* GPL-only, unused exported symbols. */
        unsigned int num_unused_gpl_syms;
        const struct kernel_symbol *unused_gpl_syms;
-       const unsigned long *unused_gpl_crcs;
+       const s32 *unused_gpl_crcs;
 #endif
 
 #ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
 
        /* symbols that will be GPL-only in the near future. */
        const struct kernel_symbol *gpl_future_syms;
-       const unsigned long *gpl_future_crcs;
+       const s32 *gpl_future_crcs;
        unsigned int num_gpl_future_syms;
 
        /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
 
 struct symsearch {
        const struct kernel_symbol *start, *stop;
-       const unsigned long *crcs;
+       const s32 *crcs;
        enum {
                NOT_GPL_ONLY,
                GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
  */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn);
 
index 9bde9558b59672a866bd763039d326bde2af0f81..27914672602d9d573e6a3da271cec33ccef51b16 100644 (file)
@@ -866,11 +866,15 @@ struct netdev_xdp {
  *     of useless work if you return NETDEV_TX_BUSY.
  *     Required; cannot be NULL.
  *
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- *             netdev_features_t features);
- *     Adjusts the requested feature flags according to device-specific
- *     constraints, and returns the resulting flags. Must not modify
- *     the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ *                                        struct net_device *dev
+ *                                        netdev_features_t features);
+ *     Called by core transmit path to determine if device is capable of
+ *     performing offload operations on a given packet. This is to give
+ *     the device an opportunity to implement any restrictions that cannot
+ *     be otherwise expressed by feature flags. The check is called with
+ *     the set of features that the stack has calculated and it returns
+ *     those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  *                         void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
  *     Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *             netdev_features_t features);
+ *     Adjusts the requested feature flags according to device-specific
+ *     constraints, and returns the resulting flags. Must not modify
+ *     the device state.
+ *
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *     Called to update device configuration to new features. Passed
  *     feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
  *     Callback to use for xmit over the accelerated station. This
  *     is used in place of ndo_start_xmit on accelerated net
  *     devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- *                                        struct net_device *dev
- *                                        netdev_features_t features);
- *     Called by core transmit path to determine if device is capable of
- *     performing offload operations on a given packet. This is to give
- *     the device an opportunity to implement any restrictions that cannot
- *     be otherwise expressed by feature flags. The check is called with
- *     the set of features that the stack has calculated and it returns
- *     those the driver believes to be appropriate.
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  *                          int queue_index, u32 maxrate);
  *     Called when a user wants to set a max-rate limitation of specific
@@ -1510,6 +1511,7 @@ enum netdev_priv_flags {
  *     @max_mtu:       Interface Maximum MTU value
  *     @type:          Interface hardware type
  *     @hard_header_len: Maximum hardware header length.
+ *     @min_header_len:  Minimum hardware header length
  *
  *     @needed_headroom: Extra headroom the hardware may need, but not in all
  *                       cases can this be guaranteed
@@ -1727,6 +1729,7 @@ struct net_device {
        unsigned int            max_mtu;
        unsigned short          type;
        unsigned short          hard_header_len;
+       unsigned short          min_header_len;
 
        unsigned short          needed_headroom;
        unsigned short          needed_tailroom;
@@ -2693,6 +2696,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
 {
        if (likely(len >= dev->hard_header_len))
                return true;
+       if (len < dev->min_header_len)
+               return false;
 
        if (capable(CAP_SYS_RAWIO)) {
                memset(ll_header + len, 0, dev->hard_header_len - len);
index 1c7eec09e5eba7ae8c0cc8e82172791f992bb361..3a481a49546ef1c85d8f88bf7668f8a2c8e8c0f1 100644 (file)
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret;
+       bool ret;
 
        rcu_read_lock_sched();
 
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret = false;
+       bool ret = false;
 
        rcu_read_lock_sched();
 
index 3ebb168b9afc68ad639b5d32f6182a845c83d759..a34b141f125f0032662f147b598c9fef4fb4bcef 100644 (file)
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto out;
+               }
                tag_len = opt[opt_iter + 1];
                if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
index 7afe991e900e25838c3e66f2ff185a5226f790ff..dbf0abba33b8da21be05abf6e719f69542da80fc 100644 (file)
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
 {
        u32 hash;
 
+       /* @flowlabel may include more than a flow label, eg, the traffic class.
+        * Here we want only the flow label value.
+        */
+       flowlabel &= IPV6_FLOWLABEL_MASK;
+
        if (flowlabel ||
            net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
            (!autolabel &&
index 73dd8764746069d48cd9ed6863f54c53864bb47a..0388b9c5f5e2c7257cc0eb19be469974becab5ed 100644 (file)
@@ -178,7 +178,10 @@ static inline int lwtunnel_valid_encap_type(u16 encap_type)
 }
 static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
 {
-       return -EOPNOTSUPP;
+       /* return 0 since we are not walking attr looking for
+        * RTA_ENCAP_TYPE attribute on nexthops.
+        */
+       return 0;
 }
 
 static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
index f0e867f58722f344494c98cbdbe49d7ec875099a..c4f5e6fca17cf4e0029080410202cb66ce0fad37 100644 (file)
@@ -2006,7 +2006,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags);
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb));
 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
index 43edf82e54fffce7b3d8bfed8c832aebfc60c79b..da854fb4530f1f75e9f46a8ad57ec0251217fd7b 100644 (file)
@@ -538,6 +538,7 @@ struct se_node_acl {
        char                    initiatorname[TRANSPORT_IQN_LEN];
        /* Used to signal demo mode created ACL, disabled by default */
        bool                    dynamic_node_acl;
+       bool                    dynamic_stop;
        u32                     queue_depth;
        u32                     acl_index;
        enum target_prot_type   saved_prot_type;
index f0db7788f887b9947e0e1aa78d48a9980e80bddf..3dc91a46e8b8da0b243a12a168bbf205e5a87916 100644 (file)
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT  = 44,
        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
        ETHTOOL_LINK_MODE_10000baseER_Full_BIT  = 46,
+       ETHTOOL_LINK_MODE_2500baseT_Full_BIT    = 47,
+       ETHTOOL_LINK_MODE_5000baseT_Full_BIT    = 48,
 
 
        /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
         */
 
        __ETHTOOL_LINK_MODE_LAST
-         = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+         = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
 };
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)     \
index c396a8052f73cd91c12b676733f93b68416cd50a..052799e4d751c805de01bc8ed47c3d0a1ecde936 100644 (file)
@@ -23,14 +23,12 @@ struct ipv6_sr_hdr {
        __u8    type;
        __u8    segments_left;
        __u8    first_segment;
-       __u8    flag_1;
-       __u8    flag_2;
-       __u8    reserved;
+       __u8    flags;
+       __u16   reserved;
 
        struct in6_addr segments[0];
 };
 
-#define SR6_FLAG1_CLEANUP      (1 << 7)
 #define SR6_FLAG1_PROTECTED    (1 << 6)
 #define SR6_FLAG1_OAM          (1 << 5)
 #define SR6_FLAG1_ALERT                (1 << 4)
@@ -42,8 +40,7 @@ struct ipv6_sr_hdr {
 #define SR6_TLV_PADDING                4
 #define SR6_TLV_HMAC           5
 
-#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
-#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
+#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
 
 struct sr6_tlv {
        __u8 type;
index dfdfe4e92d3118c0ef42b5c5d64f240eba50dee5..f4f87cff6dc6c5fc9f1a91e32c9e8d3a601f10c8 100644 (file)
@@ -37,7 +37,6 @@
 #define IB_USER_VERBS_H
 
 #include <linux/types.h>
-#include <rdma/ib_verbs.h>
 
 /*
  * Increment this value if any changes that break userspace ABI
@@ -548,11 +547,17 @@ enum {
 };
 
 enum {
-       IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
+       /*
+        * This value is equal to IB_QP_DEST_QPN.
+        */
+       IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
 };
 
 enum {
-       IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
+       /*
+        * This value is equal to IB_QP_RATE_LIMIT.
+        */
+       IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
 };
 
 struct ib_uverbs_ex_create_qp {
index e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb..4dd8bd232a1d4efd012fab8757887426ece9c0aa 100644 (file)
@@ -1987,6 +1987,10 @@ config MODVERSIONS
          make them incompatible with the kernel you are running.  If
          unsure, say N.
 
+config MODULE_REL_CRCS
+       bool
+       depends on MODVERSIONS
+
 config MODULE_SRCVERSION_ALL
        bool "Source checksum for all modules"
        help
index 2ee9ec3051b20774b118a57e4609f30e87bf82be..688dd02af9857e6fe739e0b4258060820acb41e3 100644 (file)
@@ -5221,6 +5221,11 @@ err_free_css:
        return ERR_PTR(err);
 }
 
+/*
+ * The returned cgroup is fully initialized including its control mask, but
+ * it isn't associated with its kernfs_node and doesn't have the control
+ * mask applied.
+ */
 static struct cgroup *cgroup_create(struct cgroup *parent)
 {
        struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 
        cgroup_propagate_control(cgrp);
 
-       /* @cgrp doesn't have dir yet so the following will only create csses */
-       ret = cgroup_apply_control_enable(cgrp);
-       if (ret)
-               goto out_destroy;
-
        return cgrp;
 
 out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
 out_free_cgrp:
        kfree(cgrp);
        return ERR_PTR(ret);
-out_destroy:
-       cgroup_destroy_locked(cgrp);
-       return ERR_PTR(ret);
 }
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
index 110b38a58493ee4ba4c19763d2678dae8815e1af..e235bb991bdd8fd0d1c7bef06c2f3b3dcaa4ddaf 100644 (file)
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double attach due to group movement in perf_event_open.
         */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
        struct perf_event *sibling, *tmp;
        struct list_head *list = NULL;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double detach due to exit/hot-unplug + close.
         */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
  */
 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       lockdep_assert_held(&event->ctx->mutex);
+       struct perf_event_context *ctx = event->ctx;
+
+       lockdep_assert_held(&ctx->mutex);
 
        event_function_call(event, __perf_remove_from_context, (void *)flags);
+
+       /*
+        * The above event_function_call() can NO-OP when it hits
+        * TASK_TOMBSTONE. In that case we must already have been detached
+        * from the context (by perf_event_exit_event()) but the grouping
+        * might still be in-tact.
+        */
+       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+       if ((flags & DETACH_GROUP) &&
+           (event->attach_state & PERF_ATTACH_GROUP)) {
+               /*
+                * Since in that case we cannot possibly be scheduled, simply
+                * detach now.
+                */
+               raw_spin_lock_irq(&ctx->lock);
+               perf_group_detach(event);
+               raw_spin_unlock_irq(&ctx->lock);
+       }
 }
 
 /*
@@ -3464,14 +3487,15 @@ struct perf_read_data {
        int ret;
 };
 
-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
+static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
 {
-       int event_cpu = event->oncpu;
        u16 local_pkg, event_pkg;
 
        if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
-               event_pkg =  topology_physical_package_id(event_cpu);
-               local_pkg =  topology_physical_package_id(local_cpu);
+               int local_cpu = smp_processor_id();
+
+               event_pkg = topology_physical_package_id(event_cpu);
+               local_pkg = topology_physical_package_id(local_cpu);
 
                if (event_pkg == local_pkg)
                        return local_cpu;
@@ -3601,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
-       int ret = 0, cpu_to_read, local_cpu;
+       int event_cpu, ret = 0;
 
        /*
         * If event is enabled and currently active on a CPU, update the
@@ -3614,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
                        .ret = 0,
                };
 
-               local_cpu = get_cpu();
-               cpu_to_read = find_cpu_to_read(event, local_cpu);
-               put_cpu();
+               event_cpu = READ_ONCE(event->oncpu);
+               if ((unsigned)event_cpu >= nr_cpu_ids)
+                       return 0;
+
+               preempt_disable();
+               event_cpu = __perf_event_read_cpu(event, event_cpu);
 
                /*
                 * Purposely ignore the smp_call_function_single() return
                 * value.
                 *
-                * If event->oncpu isn't a valid CPU it means the event got
+                * If event_cpu isn't a valid CPU it means the event got
                 * scheduled out and that will have updated the event count.
                 *
                 * Therefore, either way, we'll have an up-to-date event count
                 * after this.
                 */
-               (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
+               (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+               preempt_enable();
                ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
@@ -6609,6 +6637,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char *buf = NULL;
        char *name;
 
+       if (vma->vm_flags & VM_READ)
+               prot |= PROT_READ;
+       if (vma->vm_flags & VM_WRITE)
+               prot |= PROT_WRITE;
+       if (vma->vm_flags & VM_EXEC)
+               prot |= PROT_EXEC;
+
+       if (vma->vm_flags & VM_MAYSHARE)
+               flags = MAP_SHARED;
+       else
+               flags = MAP_PRIVATE;
+
+       if (vma->vm_flags & VM_DENYWRITE)
+               flags |= MAP_DENYWRITE;
+       if (vma->vm_flags & VM_MAYEXEC)
+               flags |= MAP_EXECUTABLE;
+       if (vma->vm_flags & VM_LOCKED)
+               flags |= MAP_LOCKED;
+       if (vma->vm_flags & VM_HUGETLB)
+               flags |= MAP_HUGETLB;
+
        if (file) {
                struct inode *inode;
                dev_t dev;
@@ -6635,27 +6684,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                maj = MAJOR(dev);
                min = MINOR(dev);
 
-               if (vma->vm_flags & VM_READ)
-                       prot |= PROT_READ;
-               if (vma->vm_flags & VM_WRITE)
-                       prot |= PROT_WRITE;
-               if (vma->vm_flags & VM_EXEC)
-                       prot |= PROT_EXEC;
-
-               if (vma->vm_flags & VM_MAYSHARE)
-                       flags = MAP_SHARED;
-               else
-                       flags = MAP_PRIVATE;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       flags |= MAP_DENYWRITE;
-               if (vma->vm_flags & VM_MAYEXEC)
-                       flags |= MAP_EXECUTABLE;
-               if (vma->vm_flags & VM_LOCKED)
-                       flags |= MAP_LOCKED;
-               if (vma->vm_flags & VM_HUGETLB)
-                       flags |= MAP_HUGETLB;
-
                goto got_name;
        } else {
                if (vma->vm_ops && vma->vm_ops->name) {
index 8c0a0ae43521c7f8b9e97964912cc3ab5e10fd15..b59e6768c5e94ad831e78b539437cfd99576e780 100644 (file)
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
+static void __irq_domain_activate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (irq_data->parent_data)
+                       __irq_domain_activate_irq(irq_data->parent_data);
+               if (domain->ops->activate)
+                       domain->ops->activate(domain, irq_data);
+       }
+}
+
+static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (domain->ops->deactivate)
+                       domain->ops->deactivate(domain, irq_data);
+               if (irq_data->parent_data)
+                       __irq_domain_deactivate_irq(irq_data->parent_data);
+       }
+}
+
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *                          interrupt
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  */
 void irq_domain_activate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (irq_data->parent_data)
-                       irq_domain_activate_irq(irq_data->parent_data);
-               if (domain->ops->activate)
-                       domain->ops->activate(domain, irq_data);
+       if (!irqd_is_activated(irq_data)) {
+               __irq_domain_activate_irq(irq_data);
+               irqd_set_activated(irq_data);
        }
 }
 
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
  */
 void irq_domain_deactivate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (domain->ops->deactivate)
-                       domain->ops->deactivate(domain, irq_data);
-               if (irq_data->parent_data)
-                       irq_domain_deactivate_irq(irq_data->parent_data);
+       if (irqd_is_activated(irq_data)) {
+               __irq_domain_deactivate_irq(irq_data);
+               irqd_clr_activated(irq_data);
        }
 }
 
index 38d4270925d4d13619d725052aa3f9844f23bc96..3d8f126208e3ae04eeff3fd1b1e00044c0e3d0d2 100644 (file)
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
-extern const unsigned long __start___kcrctab[];
-extern const unsigned long __start___kcrctab_gpl[];
-extern const unsigned long __start___kcrctab_gpl_future[];
+extern const s32 __start___kcrctab[];
+extern const s32 __start___kcrctab_gpl[];
+extern const s32 __start___kcrctab_gpl_future[];
 #ifdef CONFIG_UNUSED_SYMBOLS
 extern const struct kernel_symbol __start___ksymtab_unused[];
 extern const struct kernel_symbol __stop___ksymtab_unused[];
 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
-extern const unsigned long __start___kcrctab_unused[];
-extern const unsigned long __start___kcrctab_unused_gpl[];
+extern const s32 __start___kcrctab_unused[];
+extern const s32 __start___kcrctab_unused_gpl[];
 #endif
 
 #ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
 
        /* Output */
        struct module *owner;
-       const unsigned long *crc;
+       const s32 *crc;
        const struct kernel_symbol *sym;
 };
 
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn)
 {
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
 }
 
 #ifdef CONFIG_MODVERSIONS
-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
-static unsigned long maybe_relocated(unsigned long crc,
-                                    const struct module *crc_owner)
+
+static u32 resolve_rel_crc(const s32 *crc)
 {
-#ifdef ARCH_RELOCATES_KCRCTAB
-       if (crc_owner == NULL)
-               return crc - (unsigned long)reloc_start;
-#endif
-       return crc;
+       return *(u32 *)((void *)crc + *crc);
 }
 
 static int check_version(Elf_Shdr *sechdrs,
                         unsigned int versindex,
                         const char *symname,
                         struct module *mod,
-                        const unsigned long *crc,
-                        const struct module *crc_owner)
+                        const s32 *crc)
 {
        unsigned int i, num_versions;
        struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
                / sizeof(struct modversion_info);
 
        for (i = 0; i < num_versions; i++) {
+               u32 crcval;
+
                if (strcmp(versions[i].name, symname) != 0)
                        continue;
 
-               if (versions[i].crc == maybe_relocated(*crc, crc_owner))
+               if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
+                       crcval = resolve_rel_crc(crc);
+               else
+                       crcval = *crc;
+               if (versions[i].crc == crcval)
                        return 1;
-               pr_debug("Found checksum %lX vs module %lX\n",
-                      maybe_relocated(*crc, crc_owner), versions[i].crc);
+               pr_debug("Found checksum %X vs module %lX\n",
+                        crcval, versions[i].crc);
                goto bad_version;
        }
 
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
                                          unsigned int versindex,
                                          struct module *mod)
 {
-       const unsigned long *crc;
+       const s32 *crc;
 
        /*
         * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
        }
        preempt_enable();
        return check_version(sechdrs, versindex,
-                            VMLINUX_SYMBOL_STR(module_layout), mod, crc,
-                            NULL);
+                            VMLINUX_SYMBOL_STR(module_layout), mod, crc);
 }
 
 /* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
                                unsigned int versindex,
                                const char *symname,
                                struct module *mod,
-                               const unsigned long *crc,
-                               const struct module *crc_owner)
+                               const s32 *crc)
 {
        return 1;
 }
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
 {
        struct module *owner;
        const struct kernel_symbol *sym;
-       const unsigned long *crc;
+       const s32 *crc;
        int err;
 
        /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
        if (!sym)
                goto unlock;
 
-       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
-                          owner)) {
+       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
                sym = ERR_PTR(-EINVAL);
                goto getname;
        }
index b6e4c16377c708027c1c8c52914106fb71e3ca93..9c15a9124e83b50661414fa425393cdb51866313 100644 (file)
@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
        if (WARN_ON(!trace->entries))
                return;
 
-       for (i = 0; i < trace->nr_entries; i++) {
-               printk("%*c", 1 + spaces, ' ');
-               print_ip_sym(trace->entries[i]);
-       }
+       for (i = 0; i < trace->nr_entries; i++)
+               printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
 }
 EXPORT_SYMBOL_GPL(print_stack_trace);
 
@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
                        struct stack_trace *trace, int spaces)
 {
        int i;
-       unsigned long ip;
        int generated;
        int total = 0;
 
@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
                return 0;
 
        for (i = 0; i < trace->nr_entries; i++) {
-               ip = trace->entries[i];
-               generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
-                               1 + spaces, ' ', (void *) ip, (void *) ip);
+               generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
+                                    (void *)trace->entries[i]);
 
                total += generated;
 
index 74e0388cc88d4d17b340d102ec8e053257d6ef55..fc6f740d02771284d18791633c6740861574a105 100644 (file)
@@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                 */
                if (delta == 0) {
                        tick_nohz_restart(ts, now);
+                       /*
+                        * Make sure next tick stop doesn't get fooled by past
+                        * clock deadline
+                        */
+                       ts->next_tick = 0;
                        goto out;
                }
        }
index 775569ec50d03fbf0ca4f755f79d106a63acaf1a..af344a1bf0d0e6270e5e659ffa160753e148cdd9 100644 (file)
@@ -266,7 +266,7 @@ out:
 static struct cpumask save_cpumask;
 static bool disable_migrate;
 
-static void move_to_next_cpu(void)
+static void move_to_next_cpu(bool initmask)
 {
        static struct cpumask *current_mask;
        int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
                return;
 
        /* Just pick the first CPU on first iteration */
-       if (!current_mask) {
+       if (initmask) {
                current_mask = &save_cpumask;
                get_online_cpus();
                cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
 static int kthread_fn(void *data)
 {
        u64 interval;
+       bool initmask = true;
 
        while (!kthread_should_stop()) {
 
-               move_to_next_cpu();
+               move_to_next_cpu(initmask);
+               initmask = false;
 
                local_irq_disable();
                get_sample();
index a133ecd741e437d938ca377cf5e4358bcb586aa4..7ad9e53ad174bc6cdb0f99490f87e419e9381b02 100644 (file)
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
-static struct __init trace_event_file *
+static __init struct trace_event_file *
 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
 {
        struct trace_event_file *file;
index 4bbd38ec37886d3d104e3d37dc80d101ab3767ac..95c6336fc2b33c6ea47611e9f9974d8521bc08fc 100644 (file)
@@ -227,11 +227,10 @@ static __init int user_namespace_sysctl_init(void)
         * properly.
         */
        user_header = register_sysctl("user", empty);
+       kmemleak_ignore(user_header);
        BUG_ON(!user_header);
        BUG_ON(!setup_userns_sysctls(&init_user_ns));
 #endif
        return 0;
 }
 subsys_initcall(user_namespace_sysctl_init);
-
-
index b772a33ef640ab0d6770bb3d249a6fe6f16eeebc..3f9afded581be1a013bda4db2c0ec3a721323364 100644 (file)
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 
                cond_resched();
 find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
+
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
index b82b3e2151574ae1abbb2cd57624114727975410..f479365530b6484bbd5cae42064521fed362961e 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
        if (likely(!kasan_report_enabled()))
                return;
 
+       disable_trace_on_warning();
+
        info.access_addr = (void *)addr;
        info.access_size = size;
        info.is_write = is_write;
index ca2723d4733849eab01b323a50e6b1bc609e308c..b8c11e063ff0746316fb792f4fe1dde0094cb828 100644 (file)
@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
        unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
             pfn < end_pfn;
-            pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+            pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
                /* Make sure the memory section is present first */
                if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
+                       if (!zone)
+                               start = pfn + i;
                        zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
-       return 1;
+
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /*
@@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
 
index bb53285a1d99666676e85697330f1a052f7c3cc0..3a7587a0314dc73fb4929a824a74f9b8948ea502 100644 (file)
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
        LIST_HEAD(list), *pos, *next;
+       LIST_HEAD(to_remove);
        struct inode *inode;
        struct shmem_inode_info *info;
        struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                /* Check if there's anything to gain */
                if (round_up(inode->i_size, PAGE_SIZE) ==
                                round_up(inode->i_size, HPAGE_PMD_SIZE)) {
-                       list_del_init(&info->shrinklist);
+                       list_move(&info->shrinklist, &to_remove);
                        removed++;
-                       iput(inode);
                        goto next;
                }
 
@@ -454,6 +454,13 @@ next:
        }
        spin_unlock(&sbinfo->shrinklist_lock);
 
+       list_for_each_safe(pos, next, &to_remove) {
+               info = list_entry(pos, struct shmem_inode_info, shrinklist);
+               inode = &info->vfs_inode;
+               list_del_init(&info->shrinklist);
+               iput(inode);
+       }
+
        list_for_each_safe(pos, next, &list) {
                int ret;
 
index 7aa6f433f4de554d308e774d9e9b40507c6ab48a..7ec0a965c6a3a366550b2500f9880f65e5025138 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1422,6 +1422,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
        int err;
        unsigned long i, count = oo_objects(s->oo);
 
+       /* Bailout if already initialised */
+       if (s->random_seq)
+               return 0;
+
        err = cache_random_seq_create(s, count, GFP_KERNEL);
        if (err) {
                pr_err("SLUB: Unable to initialize free list for %s\n",
index 067a0d62f31841d16913d36e38531a277ab59b01..cabf09e0128beebdee2b8a959361fe6464fb3469 100644 (file)
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
 
 /* Enable/disable zswap (disabled by default) */
 static bool zswap_enabled;
-module_param_named(enabled, zswap_enabled, bool, 0644);
+static int zswap_enabled_param_set(const char *,
+                                  const struct kernel_param *);
+static struct kernel_param_ops zswap_enabled_param_ops = {
+       .set =          zswap_enabled_param_set,
+       .get =          param_get_bool,
+};
+module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 
 /* Crypto compressor to use */
 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 /* used by param callback function */
 static bool zswap_init_started;
 
+/* fatal error during init */
+static bool zswap_init_failed;
+
 /*********************************
 * helpers and fwd declarations
 **********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
        char *s = strstrip((char *)val);
        int ret;
 
+       if (zswap_init_failed) {
+               pr_err("can't set param, initialization failed\n");
+               return -ENODEV;
+       }
+
        /* no change required */
        if (!strcmp(s, *(char **)kp->arg))
                return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
        return __zswap_param_set(val, kp, NULL, zswap_compressor);
 }
 
+static int zswap_enabled_param_set(const char *val,
+                                  const struct kernel_param *kp)
+{
+       if (zswap_init_failed) {
+               pr_err("can't enable, initialization failed\n");
+               return -ENODEV;
+       }
+
+       return param_set_bool(val, kp);
+}
+
 /*********************************
 * writeback code
 **********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
 dstmem_fail:
        zswap_entry_cache_destroy();
 cache_fail:
+       /* if built-in, we aren't unloaded on failure; don't allow use */
+       zswap_init_failed = true;
+       zswap_enabled = false;
        return -ENOMEM;
 }
 /* must be late so crypto has time to come up */
index 1108079d934f8383a599d7997b08100fca0465e9..5488e4a6ccd062e6f6e7e2b841dde5ef055d4337 100644 (file)
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  * @func: callback function on filter match
  * @data: returned parameter for callback function
  * @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
  *
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  */
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                    void (*func)(struct sk_buff *, void *), void *data,
-                   char *ident)
+                   char *ident, struct sock *sk)
 {
        struct receiver *r;
        struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                r->func    = func;
                r->data    = data;
                r->ident   = ident;
+               r->sk      = sk;
 
                hlist_add_head_rcu(&r->list, rl);
                d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
 static void can_rx_delete_receiver(struct rcu_head *rp)
 {
        struct receiver *r = container_of(rp, struct receiver, rcu);
+       struct sock *sk = r->sk;
 
        kmem_cache_free(rcv_cache, r);
+       if (sk)
+               sock_put(sk);
 }
 
 /**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        spin_unlock(&can_rcvlists_lock);
 
        /* schedule the receiver item for deletion */
-       if (r)
+       if (r) {
+               if (r->sk)
+                       sock_hold(r->sk);
                call_rcu(&r->rcu, can_rx_delete_receiver);
+       }
 }
 EXPORT_SYMBOL(can_rx_unregister);
 
index fca0fe9fc45a497cdf3da82d5414e846e7cc61b7..b86f5129e8385fe84ef671bb914e8e05c2977ca0 100644 (file)
 
 struct receiver {
        struct hlist_node list;
-       struct rcu_head rcu;
        canid_t can_id;
        canid_t mask;
        unsigned long matches;
        void (*func)(struct sk_buff *, void *);
        void *data;
        char *ident;
+       struct sock *sk;
+       struct rcu_head rcu;
 };
 
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
index 21ac75390e3d64f795faad074b515d34ce0bbfa3..95d13b233c65161cf3595a8b0036207f5c2892e3 100644 (file)
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
 
 static void bcm_remove_op(struct bcm_op *op)
 {
-       hrtimer_cancel(&op->timer);
-       hrtimer_cancel(&op->thrtimer);
-
-       if (op->tsklet.func)
-               tasklet_kill(&op->tsklet);
+       if (op->tsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+                      hrtimer_active(&op->timer)) {
+                       hrtimer_cancel(&op->timer);
+                       tasklet_kill(&op->tsklet);
+               }
+       }
 
-       if (op->thrtsklet.func)
-               tasklet_kill(&op->thrtsklet);
+       if (op->thrtsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+                      hrtimer_active(&op->thrtimer)) {
+                       hrtimer_cancel(&op->thrtimer);
+                       tasklet_kill(&op->thrtsklet);
+               }
+       }
 
        if ((op->frames) && (op->frames != &op->sframe))
                kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                                err = can_rx_register(dev, op->can_id,
                                                      REGMASK(op->can_id),
                                                      bcm_rx_handler, op,
-                                                     "bcm");
+                                                     "bcm", sk);
 
                                op->rx_reg_dev = dev;
                                dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                } else
                        err = can_rx_register(NULL, op->can_id,
                                              REGMASK(op->can_id),
-                                             bcm_rx_handler, op, "bcm");
+                                             bcm_rx_handler, op, "bcm", sk);
                if (err) {
                        /* this bcm rx op is broken -> remove it */
                        list_del(&op->list);
index a54ab0c821048ab2034bf32cef3c1f35e0dc82a5..7056a1a2bb70098e691ce557f05e5bc1f27cb42f 100644 (file)
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
 {
        return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
                               gwj->ccgw.filter.can_mask, can_can_gw_rcv,
-                              gwj, "gw");
+                              gwj, "gw", NULL);
 }
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
index b075f028d7e23958e9433a4b19f4475ad930b547..6dc546a06673ff41fc121c546ebd0567bb0da05f 100644 (file)
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
        for (i = 0; i < count; i++) {
                err = can_rx_register(dev, filter[i].can_id,
                                      filter[i].can_mask,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
                if (err) {
                        /* clean up successfully registered filters */
                        while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
 
        if (err_mask)
                err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
 
        return err;
 }
index 662bea5871656f190a61e35b3c5cd21c2f132441..ea633342ab0d046cbc49e55b679440ef9e015c2d 100644 (file)
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
 EXPORT_SYMBOL(__skb_free_datagram_locked);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags)
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb))
 {
        int err = 0;
 
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        atomic_dec(&skb->users);
+                       if (destructor)
+                               destructor(sk, skb);
                        err = 0;
                }
                spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
 
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
 {
-       int err = __sk_queue_drop_skb(sk, skb, flags);
+       int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
 
        kfree_skb(skb);
        sk_mem_reclaim_partial(sk);
index 7f218e095361520d11c243d650e053321ea7274f..29101c98399f40b6b8e42c31a255d8f1fb6bd7a1 100644 (file)
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
 
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call static_key_slow_dec() from irq context
- * If net_disable_timestamp() is called from irq context, defer the
- * static_key_slow_dec() calls.
- */
 static atomic_t netstamp_needed_deferred;
-#endif
-
-void net_enable_timestamp(void)
+static void netstamp_clear(struct work_struct *work)
 {
-#ifdef HAVE_JUMP_LABEL
        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
 
-       if (deferred) {
-               while (--deferred)
-                       static_key_slow_dec(&netstamp_needed);
-               return;
-       }
+       while (deferred--)
+               static_key_slow_dec(&netstamp_needed);
+}
+static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
+
+void net_enable_timestamp(void)
+{
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-       if (in_interrupt()) {
-               atomic_inc(&netstamp_needed_deferred);
-               return;
-       }
-#endif
+       /* net_disable_timestamp() can be called from non process context */
+       atomic_inc(&netstamp_needed_deferred);
+       schedule_work(&netstamp_work);
+#else
        static_key_slow_dec(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
index 236a21e3c878e73fbd97d74de3694caaeab5e762..d92de0a1f0a49d51ec8329c65d46a4f2ae304ebd 100644 (file)
@@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = vzalloc(reglen);
-       if (reglen && !regbuf)
-               return -ENOMEM;
+       regbuf = NULL;
+       if (reglen) {
+               regbuf = vzalloc(reglen);
+               if (!regbuf)
+                       return -ENOMEM;
+       }
 
        ops->get_regs(dev, &regs, regbuf);
 
index da38621245458bae2506b0c030d92315f1be5775..0f99297b2fb3517942bf74fee08ed3e61d65a4f0 100644 (file)
@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
        if (err) {
                dev_warn(ds->dev, "Failed to create slave %d: %d\n",
                         index, err);
+               ds->ports[index].netdev = NULL;
                return err;
        }
 
index 8c5a479681ca9ed8c2686208fc570e819a4db31a..516c87e75de7009e9e4f0cbdc80f8160c318fdf4 100644 (file)
@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
        dev->header_ops         = &eth_header_ops;
        dev->type               = ARPHRD_ETHER;
        dev->hard_header_len    = ETH_HLEN;
+       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
        dev->max_mtu            = ETH_DATA_LEN;
index 72d6f056d863603c959e1d04b9f863909a37c758..ae206163c273381ba6e8bd8a24fa050619a4a6ae 100644 (file)
@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                                goto validate_return_locked;
                        }
 
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto validate_return_locked;
+               }
                tag_len = tag[1];
                if (tag_len > (opt_len - opt_iter)) {
                        err_offset = opt_iter + 1;
index 5b15459955f84cfc26dd2b12f129b1ee4014e62b..44fd86de2823dd17de16276a8ec01b190e69b8b4 100644 (file)
@@ -1172,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
                                psf->sf_crcount = im->crcount;
                }
                in_dev_put(pmc->interface);
+               kfree(pmc);
        }
        spin_unlock_bh(&im->lock);
 }
index 53ae0c6315ad03e46f93ae68cb930fff5848edcd..900011709e3b8e4807daaa6bf537c3871a7d9306 100644 (file)
@@ -1238,7 +1238,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       /* We need to keep the dst for __ip_options_echo()
+        * We could restrict the test to opt.ts_needtime || opt.srr,
+        * but the following is good enough as IP options are not often used.
+        */
+       if (unlikely(IPCB(skb)->opt.optlen))
+               skb_dst_force(skb);
+       else
+               skb_dst_drop(skb);
 }
 
 int ip_setsockopt(struct sock *sk, int level,
index 86cca610f4c2c368476dee3602d1f74ac2cd5794..68d77b1f1495bb8dace1f6aa9c0e9a6ee5b2e5dd 100644 (file)
@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
 {
        struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
 
+       if (!skb)
+               return 0;
        pfh->wcheck = csum_partial((char *)&pfh->icmph,
                sizeof(struct icmphdr), pfh->wcheck);
        pfh->icmph.checksum = csum_fold(pfh->wcheck);
index 4a044964da6670829e5c47fef52d2cd76360b59f..0efb4c7f6704f662b6c762e48698a41564add2a4 100644 (file)
@@ -770,6 +770,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
index 1d5331a1b1dc2677316148ba9852c191e7ed0fd4..8ce50dc3ab8cac821b8a2c3e0d31f0aa42f5c9d5 100644 (file)
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
        int full_space = min_t(int, tp->window_clamp, allowed_space);
        int window;
 
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
-
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
index 1307a7c2e5445d37d1d1c4f5fbfd5bf7c29040d7..8aab7d78d25bc6eaa42dcc960cdbd5086f614cad 100644 (file)
@@ -1501,7 +1501,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        }
index f60e88e56255297e662ffc2e028f543d09271f74..a7bcc0ab5e99543a08410abe6ff3dbfc9b3753b7 100644 (file)
@@ -3386,9 +3386,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        }
 
                        if (idev) {
-                               if (idev->if_flags & IF_READY)
-                                       /* device is already configured. */
+                               if (idev->if_flags & IF_READY) {
+                                       /* device is already configured -
+                                        * but resend MLD reports, we might
+                                        * have roamed and need to update
+                                        * multicast snooping switches
+                                        */
+                                       ipv6_mc_up(idev);
                                        break;
+                               }
                                idev->if_flags |= IF_READY;
                        }
 
@@ -4009,6 +4015,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
 
        if (bump_id)
                rt_genid_bump_ipv6(dev_net(dev));
+
+       /* Make sure that a new temporary address will be created
+        * before this temporary address becomes deprecated.
+        */
+       if (ifp->flags & IFA_F_TEMPORARY)
+               addrconf_verify_rtnl();
 }
 
 static void addrconf_dad_run(struct inet6_dev *idev)
index e4198502fd98ce55c1ab6d4d9767b8506b2cf707..275cac628a95066f0a27e93f5015ddeb0172c28c 100644 (file)
@@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
        struct ipv6_sr_hdr *hdr;
        struct inet6_dev *idev;
        struct in6_addr *addr;
-       bool cleanup = false;
        int accept_seg6;
 
        hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
@@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
 #endif
 
 looped_back:
-       if (hdr->segments_left > 0) {
-               if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
-                   sr_has_cleanup(hdr))
-                       cleanup = true;
-       } else {
+       if (hdr->segments_left == 0) {
                if (hdr->nexthdr == NEXTHDR_IPV6) {
                        int offset = (hdr->hdrlen + 1) << 3;
 
@@ -418,21 +413,6 @@ looped_back:
 
        ipv6_hdr(skb)->daddr = *addr;
 
-       if (cleanup) {
-               int srhlen = (hdr->hdrlen + 1) << 3;
-               int nh = hdr->nexthdr;
-
-               skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
-               memmove(skb_network_header(skb) + srhlen,
-                       skb_network_header(skb),
-                       (unsigned char *)hdr - skb_network_header(skb));
-               skb->network_header += srhlen;
-               ipv6_hdr(skb)->nexthdr = nh;
-               ipv6_hdr(skb)->payload_len = htons(skb->len -
-                                                  sizeof(struct ipv6hdr));
-               skb_push_rcsum(skb, sizeof(struct ipv6hdr));
-       }
-
        skb_dst_drop(skb);
 
        ip6_route_input(skb);
@@ -453,13 +433,8 @@ looped_back:
                }
                ipv6_hdr(skb)->hop_limit--;
 
-               /* be sure that srh is still present before reinjecting */
-               if (!cleanup) {
-                       skb_pull(skb, sizeof(struct ipv6hdr));
-                       goto looped_back;
-               }
-               skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-               IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+               skb_pull(skb, sizeof(struct ipv6hdr));
+               goto looped_back;
        }
 
        dst_input(skb);
index 558631860d91bbcb1321a4b566b6e92ddcaf7163..630b73be599977599c0021849fc6eb689cfefad7 100644 (file)
@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
 
 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               u8 type, u8 code, int offset, __be32 info)
+                      u8 type, u8 code, int offset, __be32 info)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
-       __be16 *p = (__be16 *)(skb->data + offset);
-       int grehlen = offset + 4;
+       const struct gre_base_hdr *greh;
+       const struct ipv6hdr *ipv6h;
+       int grehlen = sizeof(*greh);
        struct ip6_tnl *t;
+       int key_off = 0;
        __be16 flags;
+       __be32 key;
 
-       flags = p[0];
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       return;
-               if (flags&GRE_KEY) {
-                       grehlen += 4;
-                       if (flags&GRE_CSUM)
-                               grehlen += 4;
-               }
+       if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       flags = greh->flags;
+       if (flags & (GRE_VERSION | GRE_ROUTING))
+               return;
+       if (flags & GRE_CSUM)
+               grehlen += 4;
+       if (flags & GRE_KEY) {
+               key_off = grehlen + offset;
+               grehlen += 4;
        }
 
-       /* If only 8 bytes returned, keyed message will be dropped here */
-       if (!pskb_may_pull(skb, grehlen))
+       if (!pskb_may_pull(skb, offset + grehlen))
                return;
        ipv6h = (const struct ipv6hdr *)skb->data;
-       p = (__be16 *)(skb->data + offset);
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
 
        t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                                key, greh->protocol);
        if (!t)
                return;
 
index 2c0df09e90365ad38b5362f77c6e33a24fc062f0..b6a94ff0bbd0d5f42c78b7bcd6ea9fbe5ac9a595 100644 (file)
@@ -1344,7 +1344,7 @@ emsgsize:
         */
        if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
            headersize == sizeof(struct ipv6hdr) &&
-           length < mtu - headersize &&
+           length <= mtu - headersize &&
            !(flags & MSG_MORE) &&
            rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
                csummode = CHECKSUM_PARTIAL;
index ff8ee06491c335d209e86bb15f2526ab1915df3b..75fac933c209a0f430279dea10b5dd2426a7ed31 100644 (file)
@@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                                if (i + sizeof(*tel) > optlen)
                                        break;
 
-                               tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
+                               tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
                                /* return index of option if found and valid */
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
                                    tel->length == 1)
index 7139fffd61b6f764a9d0ae02ed41365afa3ab55c..1bdc703cb9668bd77690c3d8f1ec0062d7b88c43 100644 (file)
@@ -779,6 +779,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
                                psf->sf_crcount = im->mca_crcount;
                }
                in6_dev_put(pmc->idev);
+               kfree(pmc);
        }
        spin_unlock_bh(&im->mca_lock);
 }
index 03a064803626890ade73073cc12735aec777f9e5..6ef3dfb6e811642f1fc9b680e0b255a9399bb024 100644 (file)
@@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         * hash function (RadioGatun) with up to 1216 bits
         */
 
-       /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */
+       /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
        plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
 
        /* this limit allows for 14 segments */
@@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         *
         * 1. Source IPv6 address (128 bits)
         * 2. first_segment value (8 bits)
-        * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0)
+        * 3. Flags (8 bits)
         * 4. HMAC Key ID (32 bits)
         * 5. All segments in the segments list (n * 128 bits)
         */
@@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
        /* first_segment value */
        *off++ = hdr->first_segment;
 
-       /* cleanup flag */
-       *off++ = !!(sr_has_cleanup(hdr)) << 7;
+       /* flags */
+       *off++ = hdr->flags;
 
        /* HMAC Key ID */
        memcpy(off, &hmackeyid, 4);
index fad992ad4bc83e8fa0dbdae194a4f8b54e28efa2..99853c6e33a8c3def99ecb56e288cce4a38a997b 100644 (file)
@@ -1380,6 +1380,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
        err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
        if (err) {
                free_percpu(dev->tstats);
+               dev->tstats = NULL;
                return err;
        }
 
index cb8929681dc7597eebcc46026e4b6548f4bedadb..eaad72c3d7462b4af09d632fe88466148964e679 100644 (file)
@@ -991,6 +991,16 @@ drop:
        return 0; /* don't send reset */
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
                                         struct dst_entry *dst,
@@ -1182,8 +1192,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                                                      sk_gfp_mask(sk, GFP_ATOMIC));
                        consume_skb(ireq->pktopts);
                        ireq->pktopts = NULL;
-                       if (newnp->pktoptions)
+                       if (newnp->pktoptions) {
+                               tcp_v6_restore_cb(newnp->pktoptions);
                                skb_set_owner_r(newnp->pktoptions, newsk);
+                       }
                }
        }
 
@@ -1198,16 +1210,6 @@ out:
        return NULL;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
-}
-
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
index 4d5c4eee4b3f506cf030bb9bce20c5b94086e011..8990856f5101eaabaf14d4017df522f37845083b 100644 (file)
@@ -441,7 +441,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                if (is_udp4) {
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_CSUMERRORS, is_udplite);
index 7e08a4d3d77d0650e9f0c50cd0ab9a8568147f37..64f0e8531af056b822409f234d8806f463c4d5e5 100644 (file)
@@ -929,23 +929,25 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out_error;
        }
 
-       /* New message, alloc head skb */
-       head = alloc_skb(0, sk->sk_allocation);
-       while (!head) {
-               kcm_push(kcm);
-               err = sk_stream_wait_memory(sk, &timeo);
-               if (err)
-                       goto out_error;
-
+       if (msg_data_left(msg)) {
+               /* New message, alloc head skb */
                head = alloc_skb(0, sk->sk_allocation);
-       }
+               while (!head) {
+                       kcm_push(kcm);
+                       err = sk_stream_wait_memory(sk, &timeo);
+                       if (err)
+                               goto out_error;
 
-       skb = head;
+                       head = alloc_skb(0, sk->sk_allocation);
+               }
 
-       /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
-        * csum_and_copy_from_iter from skb_do_copy_data_nocache.
-        */
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               skb = head;
+
+               /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
+                * csum_and_copy_from_iter from skb_do_copy_data_nocache.
+                */
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
 
 start:
        while (msg_data_left(msg)) {
@@ -1018,10 +1020,12 @@ wait_for_memory:
        if (eor) {
                bool not_busy = skb_queue_empty(&sk->sk_write_queue);
 
-               /* Message complete, queue it on send buffer */
-               __skb_queue_tail(&sk->sk_write_queue, head);
-               kcm->seq_skb = NULL;
-               KCM_STATS_INCR(kcm->stats.tx_msgs);
+               if (head) {
+                       /* Message complete, queue it on send buffer */
+                       __skb_queue_tail(&sk->sk_write_queue, head);
+                       kcm->seq_skb = NULL;
+                       KCM_STATS_INCR(kcm->stats.tx_msgs);
+               }
 
                if (msg->msg_flags & MSG_BATCH) {
                        kcm->tx_wait_more = true;
index 8f560f7140a05694c13904d9b171ba67d9d11292..aebf281d09eeb31c531eb624bd2ddd78cab8da9b 100644 (file)
@@ -263,6 +263,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
 int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
                         const struct l2tp_nl_cmd_ops *ops);
 void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
index 3d73278b86ca34bfbd774dc8f52e490169445e1b..28c21546d5b60dcd07bbf6347389e97c918bf40f 100644 (file)
@@ -11,6 +11,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <asm/ioctls.h>
 #include <linux/icmp.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
@@ -553,6 +554,30 @@ out:
        return err ? err : copied;
 }
 
+int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+       struct sk_buff *skb;
+       int amount;
+
+       switch (cmd) {
+       case SIOCOUTQ:
+               amount = sk_wmem_alloc_get(sk);
+               break;
+       case SIOCINQ:
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               skb = skb_peek(&sk->sk_receive_queue);
+               amount = skb ? skb->len : 0;
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
+               break;
+
+       default:
+               return -ENOIOCTLCMD;
+       }
+
+       return put_user(amount, (int __user *)arg);
+}
+EXPORT_SYMBOL(l2tp_ioctl);
+
 static struct proto l2tp_ip_prot = {
        .name              = "L2TP/IP",
        .owner             = THIS_MODULE,
@@ -561,7 +586,7 @@ static struct proto l2tp_ip_prot = {
        .bind              = l2tp_ip_bind,
        .connect           = l2tp_ip_connect,
        .disconnect        = l2tp_ip_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip_destroy_sock,
        .setsockopt        = ip_setsockopt,
        .getsockopt        = ip_getsockopt,
index 331ccf5a7bad80e011997e071489d7775b0c68c6..f47c45250f86c9189e0a6bbfd92b21cbe2069406 100644 (file)
@@ -722,7 +722,7 @@ static struct proto l2tp_ip6_prot = {
        .bind              = l2tp_ip6_bind,
        .connect           = l2tp_ip6_connect,
        .disconnect        = l2tp_ip6_disconnect,
-       .ioctl             = udp_ioctl,
+       .ioctl             = l2tp_ioctl,
        .destroy           = l2tp_ip6_destroy_sock,
        .setsockopt        = ipv6_setsockopt,
        .getsockopt        = ipv6_getsockopt,
index ecfdd97758a386ed9d642a3fc141f2ec6da13db2..5c3af5eb405232167bbd62a13b4d1f37370d6bc0 100644 (file)
@@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2)) {
                kfree(tmp);
                return PTR_ERR(tfm2);
@@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2))
                return PTR_ERR(tfm2);
        /* K2 for CTR */
@@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb,
        crypt_len = skb->data + skb->len - encr;
        skb_put(skb, AES_BLOCK_SIZE);
        return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
-                              encr, crypt_len, 1, addr, len, encr);
+                              encr, crypt_len, 5, addr, len, encr);
 }
 
 int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
index 42120d965263d2ec1719211da37b7900814e4122..50e1b7f78bd49605d2dbca4c215befecc1d8d001 100644 (file)
@@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
        /* fast-forward to vendor IEs */
        offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
 
-       if (offset) {
+       if (offset < ifmsh->ie_len) {
                len = ifmsh->ie_len - offset;
                data = ifmsh->ie + offset;
                if (skb_tailroom(skb) < len)
index 3d555c79a7b571161f88bb8df426ea5e6bf1f9dd..d56ee46b11fc9524e457e5fe8adf10c105a66ab6 100644 (file)
@@ -2755,7 +2755,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct virtio_net_hdr vnet_hdr = { 0 };
        int offset = 0;
        struct packet_sock *po = pkt_sk(sk);
-       int hlen, tlen;
+       int hlen, tlen, linear;
        int extra_len = 0;
 
        /*
@@ -2816,8 +2816,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        err = -ENOBUFS;
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
-       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
-                              __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
+       linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
+       linear = max(linear, min_t(int, len, dev->hard_header_len));
+       skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
                               msg->msg_flags & MSG_DONTWAIT, &err);
        if (skb == NULL)
                goto out_unlock;
index 970db7a41684aa2a494b97663f91ca932308de05..5752789acc135250c312199c2d6e5e15d05fdea0 100644 (file)
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                               &mask->icmp.type,
                               TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
                               sizeof(key->icmp.type));
-               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
+               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
                               &mask->icmp.code,
-                              TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
+                              TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
                               sizeof(key->icmp.code));
        }
 
index f935429bd5ef1fcbe6a4272876b76e2ebb574c4b..b12bc2abea931a7defd1e23eb86a20fe09e76388 100644 (file)
 #include <net/sch_generic.h>
 #include <net/pkt_cls.h>
 
-struct cls_mall_filter {
+struct cls_mall_head {
        struct tcf_exts exts;
        struct tcf_result res;
        u32 handle;
-       struct rcu_head rcu;
        u32 flags;
-};
-
-struct cls_mall_head {
-       struct cls_mall_filter *filter;
        struct rcu_head rcu;
 };
 
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                         struct tcf_result *res)
 {
        struct cls_mall_head *head = rcu_dereference_bh(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
-       if (tc_skip_sw(f->flags))
+       if (tc_skip_sw(head->flags))
                return -1;
 
-       return tcf_exts_exec(skb, &f->exts, res);
+       return tcf_exts_exec(skb, &head->exts, res);
 }
 
 static int mall_init(struct tcf_proto *tp)
 {
-       struct cls_mall_head *head;
-
-       head = kzalloc(sizeof(*head), GFP_KERNEL);
-       if (!head)
-               return -ENOBUFS;
-
-       rcu_assign_pointer(tp->root, head);
-
        return 0;
 }
 
-static void mall_destroy_filter(struct rcu_head *head)
+static void mall_destroy_rcu(struct rcu_head *rcu)
 {
-       struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
+       struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
+                                                 rcu);
 
-       tcf_exts_destroy(&f->exts);
-
-       kfree(f);
+       tcf_exts_destroy(&head->exts);
+       kfree(head);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
-                                 struct cls_mall_filter *f,
+                                 struct cls_mall_head *head,
                                  unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        offload.type = TC_SETUP_MATCHALL;
        offload.cls_mall = &mall_offload;
        offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
-       offload.cls_mall->exts = &f->exts;
+       offload.cls_mall->exts = &head->exts;
        offload.cls_mall->cookie = cookie;
 
        return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 }
 
 static void mall_destroy_hw_filter(struct tcf_proto *tp,
-                                  struct cls_mall_filter *f,
+                                  struct cls_mall_head *head,
                                   unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f = head->filter;
 
-       if (!force && f)
-               return false;
+       if (!head)
+               return true;
 
-       if (f) {
-               if (tc_should_offload(dev, tp, f->flags))
-                       mall_destroy_hw_filter(tp, f, (unsigned long) f);
+       if (tc_should_offload(dev, tp, head->flags))
+               mall_destroy_hw_filter(tp, head, (unsigned long) head);
 
-               call_rcu(&f->rcu, mall_destroy_filter);
-       }
-       kfree_rcu(head, rcu);
+       call_rcu(&head->rcu, mall_destroy_rcu);
        return true;
 }
 
 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
-
-       if (f && f->handle == handle)
-               return (unsigned long) f;
-       return 0;
+       return 0UL;
 }
 
 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
 };
 
 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
-                         struct cls_mall_filter *f,
+                         struct cls_mall_head *head,
                          unsigned long base, struct nlattr **tb,
                          struct nlattr *est, bool ovr)
 {
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
                return err;
 
        if (tb[TCA_MATCHALL_CLASSID]) {
-               f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
-               tcf_bind_filter(tp, &f->res, base);
+               head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+               tcf_bind_filter(tp, &head->res, base);
        }
 
-       tcf_exts_change(tp, &f->exts, &e);
+       tcf_exts_change(tp, &head->exts, &e);
 
        return 0;
 }
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                       unsigned long *arg, bool ovr)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f;
        struct nlattr *tb[TCA_MATCHALL_MAX + 1];
+       struct cls_mall_head *new;
        u32 flags = 0;
        int err;
 
        if (!tca[TCA_OPTIONS])
                return -EINVAL;
 
-       if (head->filter)
-               return -EBUSY;
-
-       if (fold)
-               return -EINVAL;
+       if (head)
+               return -EEXIST;
 
        err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
                               tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
        }
 
-       f = kzalloc(sizeof(*f), GFP_KERNEL);
-       if (!f)
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
                return -ENOBUFS;
 
-       tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
+       tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
 
        if (!handle)
                handle = 1;
-       f->handle = handle;
-       f->flags = flags;
+       new->handle = handle;
+       new->flags = flags;
 
-       err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
+       err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
        if (err)
                goto errout;
 
        if (tc_should_offload(dev, tp, flags)) {
-               err = mall_replace_hw_filter(tp, f, (unsigned long) f);
+               err = mall_replace_hw_filter(tp, new, (unsigned long) new);
                if (err) {
                        if (tc_skip_sw(flags))
                                goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                }
        }
 
-       *arg = (unsigned long) f;
-       rcu_assign_pointer(head->filter, f);
-
+       *arg = (unsigned long) head;
+       rcu_assign_pointer(tp->root, new);
+       if (head)
+               call_rcu(&head->rcu, mall_destroy_rcu);
        return 0;
 
 errout:
-       kfree(f);
+       kfree(new);
        return err;
 }
 
 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
-       struct net_device *dev = tp->q->dev_queue->dev;
-
-       if (tc_should_offload(dev, tp, f->flags))
-               mall_destroy_hw_filter(tp, f, (unsigned long) f);
-
-       RCU_INIT_POINTER(head->filter, NULL);
-       tcf_unbind_filter(tp, &f->res);
-       call_rcu(&f->rcu, mall_destroy_filter);
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
        if (arg->count < arg->skip)
                goto skip;
-       if (arg->fn(tp, (unsigned long) f, arg) < 0)
+       if (arg->fn(tp, (unsigned long) head, arg) < 0)
                arg->stop = 1;
 skip:
        arg->count++;
@@ -255,28 +218,28 @@ skip:
 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
+       struct cls_mall_head *head = (struct cls_mall_head *) fh;
        struct nlattr *nest;
 
-       if (!f)
+       if (!head)
                return skb->len;
 
-       t->tcm_handle = f->handle;
+       t->tcm_handle = head->handle;
 
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (!nest)
                goto nla_put_failure;
 
-       if (f->res.classid &&
-           nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
+       if (head->res.classid &&
+           nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
                goto nla_put_failure;
 
-       if (tcf_exts_dump(skb, &f->exts))
+       if (tcf_exts_dump(skb, &head->exts))
                goto nla_put_failure;
 
        nla_nest_end(skb, nest);
 
-       if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+       if (tcf_exts_dump_stats(skb, &head->exts) < 0)
                goto nla_put_failure;
 
        return skb->len;
index 37eeab7899fc235a56bd2f4ccdb3e6c338a8d48e..1b5d669e30292a57ed57dd920d81be2a57f97b22 100644 (file)
@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
        union sctp_addr *laddr = (union sctp_addr *)addr;
        struct sctp_transport *transport;
 
-       if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+       if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
                return NULL;
 
        addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
@@ -7426,7 +7426,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                 */
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
-               BUG_ON(sk != asoc->base.sk);
+               if (sk != asoc->base.sk)
+                       goto do_error;
                lock_sock(sk);
 
                *timeo_p = current_timeo;
index dc6fb79a361f1ca3ab9869fc02ba05c1a533ad9b..25d9a9cf7b66b7f4e501d38d91f6a1908830972e 100644 (file)
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (!oa->data)
                return -ENOMEM;
 
-       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
        if (!creds) {
                kfree(oa->data);
                return -ENOMEM;
index 5c1b267e22beefe7cfa83e3541783fab702a092a..aee396b9f190bb4454844c7282fd4d3f5d85b5e7 100644 (file)
@@ -5916,6 +5916,7 @@ do {                                                                          \
                        break;
                }
                cfg->ht_opmode = ht_opmode;
+               mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
                                  1, 65535, mask,
index eadcd4d359d91fc7823a75263c44c520e05f900b..d883116ebaa452d9c2f6c657de53121ebd9d50bd 100644 (file)
@@ -164,6 +164,7 @@ cmd_gensymtypes_c =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
@@ -337,6 +338,7 @@ cmd_gensymtypes_S =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
index 06121ce524a76006072459d352d727b4aebdf203..c9235d8340f1e7ba33eacfaee94642c18f5fd211 100644 (file)
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
 int in_source_file;
 
 static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
-          flag_preserve, flag_warnings;
+          flag_preserve, flag_warnings, flag_rel_crcs;
 static const char *mod_prefix = "";
 
 static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
                        fputs(">\n", debugfile);
 
                /* Used as a linker script. */
-               printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc);
+               printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
+                      "SECTIONS { .rodata : ALIGN(4) { "
+                      "%s__crc_%s = .; LONG(0x%08lx); } }\n",
+                      mod_prefix, name, crc);
        }
 }
 
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
 
 static void genksyms_usage(void)
 {
-       fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n"
+       fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
 #ifdef __GNU_LIBRARY__
              "  -s, --symbol-prefix   Select symbol prefix\n"
              "  -d, --debug           Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
              "  -q, --quiet           Disable warnings (default)\n"
              "  -h, --help            Print this message\n"
              "  -V, --version         Print the release version\n"
+             "  -R, --relative-crc    Emit section relative symbol CRCs\n"
 #else                          /* __GNU_LIBRARY__ */
              "  -s                    Select symbol prefix\n"
              "  -d                    Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
              "  -q                    Disable warnings (default)\n"
              "  -h                    Print this message\n"
              "  -V                    Print the release version\n"
+             "  -R                    Emit section relative symbol CRCs\n"
 #endif                         /* __GNU_LIBRARY__ */
              , stderr);
 }
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
                {"preserve", 0, 0, 'p'},
                {"version", 0, 0, 'V'},
                {"help", 0, 0, 'h'},
+               {"relative-crc", 0, 0, 'R'},
                {0, 0, 0, 0}
        };
 
-       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
                                &long_opts[0], NULL)) != EOF)
 #else                          /* __GNU_LIBRARY__ */
-       while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
+       while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
 #endif                         /* __GNU_LIBRARY__ */
                switch (o) {
                case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
                case 'h':
                        genksyms_usage();
                        return 0;
+               case 'R':
+                       flag_rel_crcs = 1;
+                       break;
                default:
                        genksyms_usage();
                        return 1;
index 299b92ca1ae092d82e9a0e3bffaec45988ebcc37..5d554419170b7d54ec82ddb1d31093d3eab0aa7d 100644 (file)
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
                "_SDA2_BASE_",          /* ppc */
                NULL };
 
+       static char *special_prefixes[] = {
+               "__crc_",               /* modversions */
+               NULL };
+
        static char *special_suffixes[] = {
                "_veneer",              /* arm */
                "_from_arm",            /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
                if (strcmp(sym_name, special_symbols[i]) == 0)
                        return 0;
 
+       for (i = 0; special_prefixes[i]; i++) {
+               int l = strlen(special_prefixes[i]);
+
+               if (l <= strlen(sym_name) &&
+                   strncmp(sym_name, special_prefixes[i], l) == 0)
+                       return 0;
+       }
+
        for (i = 0; special_suffixes[i]; i++) {
                int l = strlen(sym_name) - strlen(special_suffixes[i]);
 
index 29c89a6bad3d3ac34e539189e83769f1c63ddab3..4dedd0d3d3a7fda58af2bc6150b9f6b6195d2cac 100644 (file)
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
                is_crc = true;
                crc = (unsigned int) sym->st_value;
+               if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
+                       unsigned int *crcp;
+
+                       /* symbol points to the CRC in the ELF object */
+                       crcp = (void *)info->hdr + sym->st_value +
+                              info->sechdrs[sym->st_shndx].sh_offset -
+                              (info->hdr->e_type != ET_REL ?
+                               info->sechdrs[sym->st_shndx].sh_addr : 0);
+                       crc = *crcp;
+               }
                sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
                                export);
        }
index c7c6619431d5fb4922dd729e9e49dd910d7967d2..d98550abe16d40250be4327197c48866953b5645 100644 (file)
@@ -5887,7 +5887,7 @@ static int selinux_setprocattr(struct task_struct *p,
                return error;
 
        /* Obtain a SID for the context, if one was specified. */
-       if (size && str[1] && str[1] != '\n') {
+       if (size && str[0] && str[0] != '\n') {
                if (str[size-1] == '\n') {
                        str[size-1] = 0;
                        size--;
index c850345c43b53dd5616b155f34f741d0ca30701c..dfa5156f35856324d86f05315edfb2ef9cf4e74e 100644 (file)
@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
 {
        unsigned long flags;
        struct snd_seq_event_cell *ptr;
-       int max_count = 5 * HZ;
 
        if (snd_BUG_ON(!pool))
                return -EINVAL;
@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
        if (waitqueue_active(&pool->output_sleep))
                wake_up(&pool->output_sleep);
 
-       while (atomic_read(&pool->counter) > 0) {
-               if (max_count == 0) {
-                       pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
-                       break;
-               }
+       while (atomic_read(&pool->counter) > 0)
                schedule_timeout_uninterruptible(1);
-               max_count--;
-       }
        
        /* release all resources */
        spin_lock_irqsave(&pool->lock, flags);
index 0bec02e89d5118b3dffe1e22e88e423baa037491..450c5187eecb6bb083736d2d2a1aad43b98c7c3f 100644 (file)
@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
        }
 }
 
+static void queue_use(struct snd_seq_queue *queue, int client, int use);
+
 /* allocate a new queue -
  * return queue index value or negative value for error
  */
@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
        if (q == NULL)
                return -ENOMEM;
        q->info_flags = info_flags;
+       queue_use(q, client, 1);
        if (queue_list_add(q) < 0) {
                queue_delete(q);
                return -ENOMEM;
        }
-       snd_seq_queue_use(q->queue, client, 1); /* use this queue */
        return q->queue;
 }
 
@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
        return result;
 }
 
-
-/* use or unuse this queue -
- * if it is the first client, starts the timer.
- * if it is not longer used by any clients, stop the timer.
- */
-int snd_seq_queue_use(int queueid, int client, int use)
+/* use or unuse this queue */
+static void queue_use(struct snd_seq_queue *queue, int client, int use)
 {
-       struct snd_seq_queue *queue;
-
-       queue = queueptr(queueid);
-       if (queue == NULL)
-               return -EINVAL;
-       mutex_lock(&queue->timer_mutex);
        if (use) {
                if (!test_and_set_bit(client, queue->clients_bitmap))
                        queue->clients++;
@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
        } else {
                snd_seq_timer_close(queue);
        }
+}
+
+/* use or unuse this queue -
+ * if it is the first client, starts the timer.
+ * if it is not longer used by any clients, stop the timer.
+ */
+int snd_seq_queue_use(int queueid, int client, int use)
+{
+       struct snd_seq_queue *queue;
+
+       queue = queueptr(queueid);
+       if (queue == NULL)
+               return -EINVAL;
+       mutex_lock(&queue->timer_mutex);
+       queue_use(queue, client, use);
        mutex_unlock(&queue->timer_mutex);
        queuefree(queue);
        return 0;
index cf9bc042fe966361588b8dc92b66e308fd50e657..3fc201c3b95a33380217950d91285c83d3d7fb8a 100644 (file)
@@ -3639,6 +3639,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",     patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
index 90009c0b3a92e42f2598e05b451a0cbc0d79c8bf..ab3c280a23d1fa430c39f55816296cf453ad2613 100644 (file)
@@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
                goto error;
        }
 
+       line6_get_interval(line6);
+
        if (properties->capabilities & LINE6_CAP_CONTROL) {
-               line6_get_interval(line6);
                ret = line6_init_cap_control(line6);
                if (ret < 0)
                        goto error;
index 5e0dea2cdc01f65849f49f10392293a21b3a468d..039636ffb6c8a3edb6c14fd9a2b3a854ab84f982 100644 (file)
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                *type = INSN_RETURN;
                break;
 
-       case 0xc5: /* iret */
        case 0xca: /* retf */
        case 0xcb: /* retf */
+       case 0xcf: /* iret */
                *type = INSN_CONTEXT_SWITCH;
                break;
 
index 9ff0db4e2d0cd1bcc35d2ea137871093ebfcb405..933aeec46f4a128754ef586c07df8e6d7d81eae0 100644 (file)
@@ -1199,7 +1199,7 @@ static int ui_init(void)
                BUG_ON(1);
        }
 
-       perf_hpp__register_sort_field(fmt);
+       perf_hpp__prepend_sort_field(fmt);
        return 0;
 }
 
index 37388397b5bc051b66dca810c65a4434eea07bb6..18cfcdc90356f89f75b328870cc5fcc621b2005f 100644 (file)
@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
        list_add_tail(&format->sort_list, &list->sorts);
 }
 
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+                                      struct perf_hpp_fmt *format)
+{
+       list_add(&format->sort_list, &list->sorts);
+}
+
 void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
        list_del(&format->list);
@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
        perf_hpp_list__for_each_sort_list(list, fmt) {
                struct perf_hpp_fmt *pos;
 
+               /* skip sort-only fields ("sort_compute" in perf diff) */
+               if (!fmt->entry && !fmt->color)
+                       continue;
+
                perf_hpp_list__for_each_format(list, pos) {
                        if (fmt_equal(fmt, pos))
                                goto next;
index 42922512c1c62fe09eb12f301c281ceb57c2bf3c..8b610dd9e2f682046ba412b31ab39f85b3b7f670 100644 (file)
@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
                }
                call->ip = cursor_node->ip;
                call->ms.sym = cursor_node->sym;
-               call->ms.map = cursor_node->map;
+               call->ms.map = map__get(cursor_node->map);
 
                if (cursor_node->branch) {
                        call->branch_count = 1;
@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent,
 
                list_for_each_entry_safe(call, tmp, &new->val, list) {
                        list_del(&call->list);
+                       map__zput(call->ms.map);
                        free(call);
                }
                free(new);
@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
                                        list->ms.map, list->ms.sym,
                                        false, NULL, 0, 0);
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
        }
 
        node->ip = ip;
-       node->map = map;
+       map__zput(node->map);
+       node->map = map__get(map);
        node->sym = sym;
        node->branch = branch;
        node->nr_loop_iter = nr_loop_iter;
@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node)
 
        list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
        list_for_each_entry_safe(list, tmp, &node->val, list) {
                list_del(&list->list);
+               map__zput(list->ms.map);
                free(list);
        }
 
@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
                                goto out;
                        *new = *chain;
                        new->has_children = false;
+                       map__get(new->ms.map);
                        list_add_tail(&new->list, &head);
                }
                parent = parent->parent;
@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
 out:
        list_for_each_entry_safe(chain, new, &head, list) {
                list_del(&chain->list);
+               map__zput(chain->ms.map);
                free(chain);
        }
        return -ENOMEM;
index 35c8e379530f2f1e02d050a25a2ef786ee10a819..4f4b60f1558a827cbe34d9d387e6a91a917b58dd 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include "event.h"
+#include "map.h"
 #include "symbol.h"
 
 #define HELP_PAD "\t\t\t\t"
@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
  */
 static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
 {
+       struct callchain_cursor_node *node;
+
        cursor->nr = 0;
        cursor->last = &cursor->first;
+
+       for (node = cursor->first; node != NULL; node = node->next)
+               map__zput(node->map);
 }
 
 int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
index 6770a964560954e0b7d028698a50b8bcab0cf891..7d1b7d33e644bd7dd97fef11f5b473770ec8a236 100644 (file)
@@ -1,6 +1,7 @@
 #include "util.h"
 #include "build-id.h"
 #include "hist.h"
+#include "map.h"
 #include "session.h"
 #include "sort.h"
 #include "evlist.h"
@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
                         int max_stack_depth, void *arg)
 {
        int err, err2;
+       struct map *alm = NULL;
+
+       if (al && al->map)
+               alm = map__get(al->map);
 
        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
                                        iter->evsel, al, max_stack_depth);
@@ -1058,6 +1063,8 @@ out:
        if (!err)
                err = err2;
 
+       map__put(alm);
+
        return err;
 }
 
index d4b6514eeef5dae417cfc76f1fc2de9cc869575f..28c216e3d5b72557825a8918ab2aa6d6369c40f8 100644 (file)
@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
                                    struct perf_hpp_fmt *format);
 void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
                                        struct perf_hpp_fmt *format);
+void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
+                                      struct perf_hpp_fmt *format);
 
 static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
 {
@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
        perf_hpp_list__register_sort_field(&perf_hpp_list, format);
 }
 
+static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
+{
+       perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
+}
+
 #define perf_hpp_list__for_each_format(_list, format) \
        list_for_each_entry(format, &(_list)->fields, list)