Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Mar 2018 22:32:00 +0000 (14:32 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Mar 2018 22:32:00 +0000 (14:32 -0800)
Pull libnvdimm fixes from Dan Williams:
 "A 4.16 regression fix, three fixes for -stable, and a cleanup fix:

   - During the merge window support for the new ACPI NVDIMM Platform
     Capabilities structure disabled support for "deep flush", a
     force-unit- access like mechanism for persistent memory. Restore
     that mechanism.

   - VFIO like RDMA is yet one more memory registration / pinning
     interface that is incompatible with Filesystem-DAX. Disable long
     term pins of Filesystem-DAX mappings via VFIO.

   - The Filesystem-DAX detection to prevent long terms pins mistakenly
     also disabled Device-DAX pins which are not subject to the same
     block- map collision concerns.

   - Similar to the setup path, softlockup warnings can trigger in the
     shutdown path for large persistent memory namespaces. Teach
     for_each_device_pfn() to perform cond_resched() in all cases.

   - Boaz noticed that the might_sleep() in dax_direct_access() is stale
     as of the v4.15 kernel.

  These have received a build success notification from the 0day robot,
  and the longterm pin fixes have appeared in -next. However, I recently
  rebased the tree to remove some other fixes that need to be reworked
  after review feedback.

* 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  memremap: fix softlockup reports at teardown
  libnvdimm: re-enable deep flush for pmem devices via fsync()
  vfio: disable filesystem-dax page pinning
  dax: fix vma_is_fsdax() helper
  dax: ->direct_access does not sleep anymore

717 files changed:
.gitignore
Documentation/PCI/pci.txt
Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt [moved from Documentation/devicetree/bindings/misc/arm-charlcd.txt with 100% similarity]
Documentation/devicetree/bindings/power/wakeup-source.txt
Documentation/devicetree/bindings/thermal/imx-thermal.txt
Documentation/gpu/tve200.rst
Documentation/i2c/busses/i2c-i801
Documentation/media/dmx.h.rst.exceptions
Documentation/media/uapi/dvb/dmx-qbuf.rst
Documentation/networking/segmentation-offloads.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/cpuid.txt
Documentation/virtual/kvm/msr.txt
Documentation/x86/topology.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/cmpxchg.h
arch/alpha/include/asm/xchg.h
arch/arc/Kconfig
arch/arc/boot/dts/axs101.dts
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/haps_hs_idu.dts
arch/arc/boot/dts/nsim_700.dts
arch/arc/boot/dts/nsim_hs.dts
arch/arc/boot/dts/nsim_hs_idu.dts
arch/arc/boot/dts/nsimosci.dts
arch/arc/boot/dts/nsimosci_hs.dts
arch/arc/boot/dts/nsimosci_hs_idu.dts
arch/arc/include/asm/bug.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/smp.c
arch/arc/kernel/unwind.c
arch/arc/mm/cache.c
arch/arm/boot/dts/bcm11351.dtsi
arch/arm/boot/dts/bcm21664.dtsi
arch/arm/boot/dts/bcm2835.dtsi
arch/arm/boot/dts/bcm2836.dtsi
arch/arm/boot/dts/bcm2837.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/gemini-dlink-dns-313.dts
arch/arm/boot/dts/imx6dl-icore-rqs.dts
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk322x.dtsi
arch/arm/boot/dts/rk3288-phycore-som.dtsi
arch/arm/boot/dts/zx296702.dtsi
arch/arm/configs/omap2plus_defconfig
arch/arm/kernel/time.c
arch/arm/kvm/hyp/Makefile
arch/arm/kvm/hyp/banked-sr.c
arch/arm/mach-clps711x/board-dt.c
arch/arm/mach-davinci/board-dm355-evm.c
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-omap1/clock.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/plat-orion/common.c
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/mediatek/mt8173.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kernel/time.c
arch/arm64/kernel/traps.c
arch/arm64/mm/mmu.c
arch/arm64/net/bpf_jit_comp.c
arch/cris/include/arch-v10/arch/bug.h
arch/ia64/include/asm/bug.h
arch/m68k/include/asm/bug.h
arch/mips/boot/Makefile
arch/mips/include/asm/compat.h
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/processor.h
arch/parisc/kernel/cache.c
arch/parisc/kernel/head.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/smp.c
arch/parisc/kernel/time.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/firmware.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/mm/drmem.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/riscv/Kconfig
arch/riscv/include/asm/barrier.h
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.S
arch/riscv/kernel/setup.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/sh/boot/dts/Makefile
arch/sparc/include/asm/bug.h
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/entry/calling.h
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/apm.h
arch/x86/include/asm/asm-prototypes.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/refcount.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/include/uapi/asm/kvm_para.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/head_64.S
arch/x86/kernel/kvm.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/module.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/Makefile
arch/x86/lib/retpoline.S
arch/x86/mm/fault.c
arch/x86/mm/mem_encrypt_boot.S
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/nmi_int.c
arch/x86/realmode/rm/trampoline_64.S
arch/x86/tools/relocs.c
arch/x86/xen/enlighten_pv.c
arch/xtensa/kernel/pci-dma.c
arch/xtensa/mm/init.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/genhd.c
block/ioctl.c
block/kyber-iosched.c
block/mq-deadline.c
block/partition-generic.c
block/sed-opal.c
certs/blacklist_nohashes.c
crypto/asymmetric_keys/pkcs7_trust.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/restrict.c
drivers/android/binder.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/brd.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/pktcdvd.c
drivers/block/swim.c
drivers/block/z2ram.c
drivers/bus/ti-sysc.c
drivers/char/tpm/st33zp24/st33zp24.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/tpm2-cmd.c
drivers/char/tpm/tpm_i2c_infineon.c
drivers/char/tpm/tpm_i2c_nuvoton.c
drivers/char/tpm/tpm_tis_core.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/scpi-cpufreq.c
drivers/crypto/ccp/psp-dev.c
drivers/crypto/s5p-sss.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-axp288.c
drivers/extcon/extcon-intel-int3496.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_rotator.h [deleted file]
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/regs-fimc.h
drivers/gpu/drm/exynos/regs-hdmi.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/meson/meson_crtc.c
drivers/gpu/drm/meson/meson_drv.h
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/gpu/ipu-v3/ipu-csi.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/hid/hid-ids.h
drivers/hid/hid-quirks.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-bcm2835.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-sirf.c
drivers/ide/ide-probe.c
drivers/iio/adc/aspeed_adc.c
drivers/iio/adc/stm32-adc.c
drivers/iio/imu/adis_trigger.c
drivers/iio/industrialio-buffer.c
drivers/iio/proximity/Kconfig
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/restrack.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_ioctl.c
drivers/infiniband/core/uverbs_ioctl_merge.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_std_types.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_fp.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_fs.c
drivers/iommu/intel-svm.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/md-multipath.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid1.h
drivers/md/raid10.c
drivers/md/raid10.h
drivers/md/raid5-log.h
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/Kconfig
drivers/media/common/videobuf2/Kconfig
drivers/media/common/videobuf2/Makefile
drivers/media/common/videobuf2/vb2-trace.c [moved from drivers/media/v4l2-core/vb2-trace.c with 100% similarity]
drivers/media/dvb-core/Makefile
drivers/media/dvb-core/dmxdev.c
drivers/media/dvb-core/dvb_demux.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-core/dvb_vb2.c
drivers/media/dvb-frontends/m88ds3103.c
drivers/media/i2c/tvp5150.c
drivers/media/pci/ttpci/av7110.c
drivers/media/pci/ttpci/av7110_av.c
drivers/media/usb/au0828/Kconfig
drivers/media/usb/ttusb-dec/ttusb_dec.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/Makefile
drivers/memory/brcmstb_dpfe.c
drivers/message/fusion/mptctl.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/ocxl/file.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-k3.c
drivers/mmc/host/dw_mmc-rockchip.c
drivers/mmc/host/dw_mmc-zx.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/sdhci-pci-core.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cavium/common/cavium_ptp.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/smsc/Kconfig
drivers/net/macvlan.c
drivers/net/phy/phy_device.c
drivers/net/thunderbolt.c
drivers/net/tun.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/xen-netfront.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/pci/quirks.c
drivers/pci/setup-res.c
drivers/perf/arm_pmu.c
drivers/perf/arm_pmu_acpi.c
drivers/perf/arm_pmu_platform.c
drivers/pinctrl/meson/pinctrl-meson-axg.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/wmi.c
drivers/scsi/Makefile
drivers/scsi/aacraid/linit.c
drivers/scsi/aic7xxx/aiclib.c [deleted file]
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/csiostor/csio_lnode.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/storvsc_drv.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/imx/gpc.c
drivers/staging/android/ashmem.c
drivers/staging/android/ion/ion_cma_heap.c
drivers/staging/fsl-mc/bus/Kconfig
drivers/staging/iio/adc/ad7192.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/bdc/bdc_pci.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/ohci-q.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/pci-quirks.h
drivers/usb/host/xhci-debugfs.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/ldusb.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_host.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/option.c
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/vhci_hcd.c
drivers/watchdog/Kconfig
drivers/xen/events/events_base.c
drivers/xen/pvcalls-back.c
drivers/xen/pvcalls-front.c
drivers/xen/tmem.c
fs/block_dev.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/super.c
fs/ceph/super.h
fs/direct-io.c
fs/efivarfs/file.c
fs/nfs/callback_proc.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4client.c
fs/signalfd.c
fs/xfs/scrub/agheader.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_rmap_item.c
fs/xfs/xfs_super.c
include/asm-generic/bug.h
include/drm/drm_atomic.h
include/drm/drm_crtc_helper.h
include/drm/drm_drv.h
include/linux/bio.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/genhd.h
include/linux/init.h
include/linux/jump_label.h
include/linux/kconfig.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/memcontrol.h
include/linux/mutex.h
include/linux/nospec.h
include/linux/perf/arm_pmu.h
include/linux/ptr_ring.h
include/linux/sched/mm.h
include/linux/sched/user.h
include/linux/skbuff.h
include/linux/swap.h
include/linux/workqueue.h
include/media/demux.h
include/media/dmxdev.h
include/media/dvb_demux.h
include/media/dvb_vb2.h
include/net/mac80211.h
include/net/regulatory.h
include/net/udplite.h
include/rdma/restrack.h
include/rdma/uverbs_ioctl.h
include/soc/arc/mcip.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/blktrace_api.h
include/uapi/linux/dvb/dmx.h
include/uapi/linux/if_ether.h
include/uapi/linux/kvm.h
include/uapi/linux/libc-compat.h
include/uapi/linux/psp-sev.h
include/uapi/linux/ptrace.h
include/uapi/rdma/rdma_user_ioctl.h
init/main.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/cpumap.c
kernel/bpf/lpm_trie.c
kernel/bpf/sockmap.c
kernel/extable.c
kernel/fork.c
kernel/irq/matrix.c
kernel/jump_label.c
kernel/printk/printk.c
kernel/relay.c
kernel/seccomp.c
kernel/trace/bpf_trace.c
kernel/user.c
kernel/workqueue.c
lib/Kconfig.debug
lib/dma-debug.c
lib/idr.c
lib/radix-tree.c
lib/vsprintf.c
mm/mlock.c
mm/page_alloc.c
mm/swap.c
mm/vmalloc.c
mm/vmscan.c
mm/zpool.c
mm/zswap.c
net/bridge/br_sysfs_if.c
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebt_limit.c
net/ceph/ceph_common.c
net/core/dev.c
net/core/filter.c
net/core/gen_estimator.c
net/decnet/af_decnet.c
net/ipv4/fib_semantics.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/route.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/ip6_checksum.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/ip6t_srh.c
net/ipv6/sit.c
net/mac80211/agg-rx.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/mesh.c
net/mac80211/spectmgmt.c
net/mac80211/sta_info.c
net/netfilter/nf_nat_proto_common.c
net/netfilter/x_tables.c
net/netfilter/xt_AUDIT.c
net/netfilter/xt_CHECKSUM.c
net/netfilter/xt_CONNSECMARK.c
net/netfilter/xt_CT.c
net/netfilter/xt_DSCP.c
net/netfilter/xt_HL.c
net/netfilter/xt_HMARK.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_LED.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_SECMARK.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_bpf.c
net/netfilter/xt_cgroup.c
net/netfilter/xt_cluster.c
net/netfilter/xt_connbytes.c
net/netfilter/xt_connlabel.c
net/netfilter/xt_connmark.c
net/netfilter/xt_conntrack.c
net/netfilter/xt_dscp.c
net/netfilter/xt_ecn.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_helper.c
net/netfilter/xt_ipcomp.c
net/netfilter/xt_ipvs.c
net/netfilter/xt_l2tp.c
net/netfilter/xt_limit.c
net/netfilter/xt_nat.c
net/netfilter/xt_nfacct.c
net/netfilter/xt_physdev.c
net/netfilter/xt_policy.c
net/netfilter/xt_recent.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netfilter/xt_state.c
net/netfilter/xt_time.c
net/netlink/af_netlink.c
net/nfc/llcp_commands.c
net/nfc/netlink.c
net/rds/connection.c
net/rxrpc/output.c
net/rxrpc/recvmsg.c
net/sched/cls_api.c
net/sched/cls_u32.c
net/sctp/debug.c
net/sctp/input.c
net/sctp/stream.c
net/sctp/stream_interleave.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/net.c
net/tipc/net.h
net/tipc/netlink_compat.c
net/tls/tls_main.c
net/unix/af_unix.c
net/wireless/mesh.c
net/wireless/sme.c
samples/seccomp/Makefile
scripts/Makefile.build
scripts/coccinelle/api/memdup.cocci
scripts/kallsyms.c
scripts/kconfig/confdata.c
scripts/kconfig/kxgettext.c
scripts/kconfig/lkc.h
scripts/kconfig/lxdialog/check-lxdialog.sh
scripts/kconfig/menu.c
scripts/kconfig/symbol.c
scripts/kconfig/util.c
scripts/kconfig/zconf.l
scripts/kconfig/zconf.y
scripts/link-vmlinux.sh
security/integrity/digsig.c
security/keys/big_key.c
sound/core/control.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks-table.h
sound/x86/intel_hdmi_audio.c
tools/bpf/bpftool/main.c
tools/bpf/bpftool/prog.c
tools/cgroup/Makefile
tools/gpio/Makefile
tools/hv/Makefile
tools/iio/Makefile
tools/kvm/kvm_stat/kvm_stat
tools/kvm/kvm_stat/kvm_stat.txt
tools/laptop/freefall/Makefile
tools/leds/Makefile
tools/lib/bpf/libbpf.c
tools/objtool/builtin-check.c
tools/objtool/builtin-orc.c
tools/objtool/builtin.h
tools/objtool/check.c
tools/objtool/check.h
tools/perf/Makefile.perf
tools/power/acpi/Makefile.config
tools/scripts/Makefile.include
tools/spi/Makefile
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux.c
tools/testing/radix-tree/linux/compiler_types.h [new file with mode: 0644]
tools/testing/radix-tree/linux/gfp.h
tools/testing/radix-tree/linux/slab.h
tools/testing/selftests/android/Makefile
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_tcpbpf_kern.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/futex/Makefile
tools/testing/selftests/memfd/Makefile
tools/testing/selftests/memfd/config [new file with mode: 0644]
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/pstore/config
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/sync/Makefile
tools/testing/selftests/vDSO/Makefile
tools/testing/selftests/vm/.gitignore
tools/usb/Makefile
tools/vm/Makefile
tools/wmi/Makefile
virt/kvm/arm/arch_timer.c
virt/kvm/kvm_main.c

index 705e09913dc237cd4eb79e269bc1a0d83ea52d99..1be78fd8163bd10677163ae10b36b874a4c615a4 100644 (file)
@@ -127,3 +127,7 @@ all.config
 
 # Kdevelop4
 *.kdev4
+
+#Automatically generated by ASN.1 compiler
+net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
+net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
index 611a75e4366ed2404b807dc03f1159f65d0a819a..badb26ac33dc8eec08d38c3ee13c24380568aadc 100644 (file)
@@ -570,7 +570,9 @@ your driver if they're helpful, or just use plain hex constants.
 The device IDs are arbitrary hex numbers (vendor controlled) and normally used
 only in a single location, the pci_device_id table.
 
-Please DO submit new vendor/device IDs to http://pciids.sourceforge.net/.
+Please DO submit new vendor/device IDs to http://pci-ids.ucw.cz/.
+There are mirrors of the pci.ids file at http://pciids.sourceforge.net/
+and https://github.com/pciutils/pciids.
 
 
 
index 3c81f78b5c27d50bb809cf6bbf13766a779024f5..5d254ab13ebf384034b7e8629365a7b0070c2af3 100644 (file)
@@ -60,7 +60,7 @@ Examples
                #size-cells = <0>;
 
                button@1 {
-                       debounce_interval = <50>;
+                       debounce-interval = <50>;
                        wakeup-source;
                        linux,code = <116>;
                        label = "POWER";
index 28be51afdb6a2623a9eb9465a86e0ec3fa7a1d7a..379eb763073e68d4a63b540c6ca3384a3b09f642 100644 (file)
@@ -22,7 +22,32 @@ Optional properties:
 - clocks : thermal sensor's clock source.
 
 Example:
+ocotp: ocotp@21bc000 {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "fsl,imx6sx-ocotp", "syscon";
+       reg = <0x021bc000 0x4000>;
+       clocks = <&clks IMX6SX_CLK_OCOTP>;
 
+       tempmon_calib: calib@38 {
+               reg = <0x38 4>;
+       };
+
+       tempmon_temp_grade: temp-grade@20 {
+               reg = <0x20 4>;
+       };
+};
+
+tempmon: tempmon {
+       compatible = "fsl,imx6sx-tempmon", "fsl,imx6q-tempmon";
+       interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+       fsl,tempmon = <&anatop>;
+       nvmem-cells = <&tempmon_calib>, <&tempmon_temp_grade>;
+       nvmem-cell-names = "calib", "temp_grade";
+       clocks = <&clks IMX6SX_CLK_PLL3_USB_OTG>;
+};
+
+Legacy method (Deprecated):
 tempmon {
        compatible = "fsl,imx6q-tempmon";
        fsl,tempmon = <&anatop>;
index 69b17b324e1273bd5353cf9e19bbbee428bfe917..152ea9398f7e56f00860ebfb3f283c8ad67e3b45 100644 (file)
@@ -3,4 +3,4 @@
 ==================================
 
 .. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
-   :doc: Faraday TV Encoder 200
+   :doc: Faraday TV Encoder TVE200 DRM Driver
index d477024569269ff741720537ef609cafb60166f2..65514c25131877a77b5b0a85bbb8797129b87dca 100644 (file)
@@ -28,8 +28,10 @@ Supported adapters:
   * Intel Wildcat Point (PCH)
   * Intel Wildcat Point-LP (PCH)
   * Intel BayTrail (SOC)
+  * Intel Braswell (SOC)
   * Intel Sunrise Point-H (PCH)
   * Intel Sunrise Point-LP (PCH)
+  * Intel Kaby Lake-H (PCH)
   * Intel DNV (SOC)
   * Intel Broxton (SOC)
   * Intel Lewisburg (PCH)
index 63f55a9ae2b1ffa54d088eb60b7e79e36c050f6e..a8c4239ed95bab6c87d8adcdb4edfa52de981099 100644 (file)
@@ -50,9 +50,15 @@ replace typedef dmx_filter_t :c:type:`dmx_filter`
 replace typedef dmx_pes_type_t :c:type:`dmx_pes_type`
 replace typedef dmx_input_t :c:type:`dmx_input`
 
-ignore symbol DMX_OUT_DECODER
-ignore symbol DMX_OUT_TAP
-ignore symbol DMX_OUT_TS_TAP
-ignore symbol DMX_OUT_TSDEMUX_TAP
+replace symbol DMX_BUFFER_FLAG_HAD_CRC32_DISCARD :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_TEI :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_PKT_COUNTER_MISMATCH :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED :c:type:`dmx_buffer_flags`
+replace        symbol DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR :c:type:`dmx_buffer_flags`
+
+replace symbol DMX_OUT_DECODER :c:type:`dmx_output`
+replace symbol DMX_OUT_TAP :c:type:`dmx_output`
+replace symbol DMX_OUT_TS_TAP :c:type:`dmx_output`
+replace symbol DMX_OUT_TSDEMUX_TAP :c:type:`dmx_output`
 
 replace ioctl DMX_DQBUF dmx_qbuf
index b48c4931658eda71586ec7ea4ddb380df518170f..be5a4c6f19040c6e5bf1acbb34f9598e6eec0b2f 100644 (file)
@@ -51,9 +51,10 @@ out to disk. Buffers remain locked until dequeued, until the
 the device is closed.
 
 Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled
-(capturing) buffer from the driver's outgoing queue. They just set the ``reserved`` field array to zero. When ``DMX_DQBUF`` is called with a
-pointer to this structure, the driver fills the remaining fields or
-returns an error code.
+(capturing) buffer from the driver's outgoing queue.
+They just set the ``index`` field withe the buffer ID to be queued.
+When ``DMX_DQBUF`` is called with a pointer to struct :c:type:`dmx_buffer`,
+the driver fills the remaining fields or returns an error code.
 
 By default ``DMX_DQBUF`` blocks when no buffer is in the outgoing
 queue. When the ``O_NONBLOCK`` flag was given to the
index 2f09455a993a13eb8e1774e1317f9ef31bd2dacf..d47480b61ac6d0611c0e1cbfe378c14941f1cfb5 100644 (file)
@@ -13,6 +13,7 @@ The following technologies are described:
  * Generic Segmentation Offload - GSO
  * Generic Receive Offload - GRO
  * Partial Generic Segmentation Offload - GSO_PARTIAL
+ * SCTP accelleration with GSO - GSO_BY_FRAGS
 
 TCP Segmentation Offload
 ========================
@@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments.  Many of the requirements for UDP
 fragmentation offload are the same as TSO.  However the IPv4 ID for
 fragments should not increment as a single IPv4 datagram is fragmented.
 
+UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
+still receive them from tuntap and similar devices. Offload of UDP-based
+tunnel protocols is still supported.
+
 IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
 ========================================================
 
@@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM.  These two additional tunnel types reflect the
 fact that the outer header also requests to have a non-zero checksum
 included in the outer header.
 
-Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header
-has requested a remote checksum offload.  In this case the inner headers
-will be left with a partial checksum and only the outer header checksum
-will be computed.
+Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel
+header has requested a remote checksum offload.  In this case the inner
+headers will be left with a partial checksum and only the outer header
+checksum will be computed.
 
 Generic Segmentation Offload
 ============================
@@ -128,3 +133,28 @@ values for if the header was simply duplicated.  The one exception to this
 is the outer IPv4 ID field.  It is up to the device drivers to guarantee
 that the IPv4 ID field is incremented in the case that a given header does
 not have the DF bit set.
+
+SCTP accelleration with GSO
+===========================
+
+SCTP - despite the lack of hardware support - can still take advantage of
+GSO to pass one large packet through the network stack, rather than
+multiple small packets.
+
+This requires a different approach to other offloads, as SCTP packets
+cannot be just segmented to (P)MTU. Rather, the chunks must be contained in
+IP segments, padding respected. So unlike regular GSO, SCTP can't just
+generate a big skb, set gso_size to the fragmentation point and deliver it
+to IP layer.
+
+Instead, the SCTP protocol layer builds an skb with the segments correctly
+padded and stored as chained skbs, and skb_segment() splits based on those.
+To signal this, gso_size is set to the special value GSO_BY_FRAGS.
+
+Therefore, any code in the core networking stack must be aware of the
+possibility that gso_size will be GSO_BY_FRAGS and handle that case
+appropriately. (For size checks, the skb_gso_validate_*_len family of
+helpers do this automatically.)
+
+This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
+set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
index 792fa8717d133e1aa7d6c73a8b948d53150e6d78..d6b3ff51a14fd96600cc9f65f8ed11cd5c64b768 100644 (file)
@@ -123,14 +123,15 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
 flag KVM_VM_MIPS_VZ.
 
 
-4.3 KVM_GET_MSR_INDEX_LIST
+4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
 
-Capability: basic
+Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
 Architectures: x86
-Type: system
+Type: system ioctl
 Parameters: struct kvm_msr_list (in/out)
 Returns: 0 on success; -1 on error
 Errors:
+  EFAULT:    the msr index list cannot be read from or written to
   E2BIG:     the msr index list is to be to fit in the array specified by
              the user.
 
@@ -139,16 +140,23 @@ struct kvm_msr_list {
        __u32 indices[0];
 };
 
-This ioctl returns the guest msrs that are supported.  The list varies
-by kvm version and host processor, but does not change otherwise.  The
-user fills in the size of the indices array in nmsrs, and in return
-kvm adjusts nmsrs to reflect the actual number of msrs and fills in
-the indices array with their numbers.
+The user fills in the size of the indices array in nmsrs, and in return
+kvm adjusts nmsrs to reflect the actual number of msrs and fills in the
+indices array with their numbers.
+
+KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported.  The list
+varies by kvm version and host processor, but does not change otherwise.
 
 Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
 not returned in the MSR list, as different vcpus can have a different number
 of banks, as set via the KVM_X86_SETUP_MCE ioctl.
 
+KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed
+to the KVM_GET_MSRS system ioctl.  This lets userspace probe host capabilities
+and processor features that are exposed via MSRs (e.g., VMX capabilities).
+This list also varies by kvm version and host processor, but does not change
+otherwise.
+
 
 4.4 KVM_CHECK_EXTENSION
 
@@ -475,14 +483,22 @@ Support for this has been removed.  Use KVM_SET_GUEST_DEBUG instead.
 
 4.18 KVM_GET_MSRS
 
-Capability: basic
+Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
 Architectures: x86
-Type: vcpu ioctl
+Type: system ioctl, vcpu ioctl
 Parameters: struct kvm_msrs (in/out)
-Returns: 0 on success, -1 on error
+Returns: number of msrs successfully returned;
+        -1 on error
+
+When used as a system ioctl:
+Reads the values of MSR-based features that are available for the VM.  This
+is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values.
+The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST
+in a system ioctl.
 
+When used as a vcpu ioctl:
 Reads model-specific registers from the vcpu.  Supported msr indices can
-be obtained using KVM_GET_MSR_INDEX_LIST.
+be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl.
 
 struct kvm_msrs {
        __u32 nmsrs; /* number of msrs in entries */
index dcab6dc11e3b08117456f10903ad3ac2fa29eb99..87a7506f31c2b7f9b03be131ba3b3ef1c85f360b 100644 (file)
@@ -58,6 +58,10 @@ KVM_FEATURE_PV_TLB_FLUSH           ||     9 || guest checks this feature bit
                                    ||       || before enabling paravirtualized
                                    ||       || tlb flush.
 ------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF_VMEXIT        ||    10 || paravirtualized async PF VM exit
+                                   ||       || can be enabled by setting bit 2
+                                   ||       || when writing to msr 0x4b564d02
+------------------------------------------------------------------------------
 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT ||    24 || host will warn if no guest-side
                                    ||       || per-cpu warps are expected in
                                    ||       || kvmclock.
index 1ebecc115dc6efdbbfa76eb4ab329f5b1d8b765e..f3f0d57ced8e1827fe8e8e6dc49808b18c9253fb 100644 (file)
@@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
        when asynchronous page faults are enabled on the vcpu 0 when
        disabled. Bit 1 is 1 if asynchronous page faults can be injected
        when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
-       are delivered to L1 as #PF vmexits.
+       are delivered to L1 as #PF vmexits.  Bit 2 can be set only if
+       KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
 
        First 4 byte of 64 byte memory location will be written to by
        the hypervisor at the time of asynchronous page fault (APF)
index f3e9d7e9ed6cbcbe4e731881ffb5203925b758db..2953e3ec9a0259f102ce40b9c28340f761c2df4e 100644 (file)
@@ -108,7 +108,7 @@ The topology of a system is described in the units of:
 
     The number of online threads is also printed in /proc/cpuinfo "siblings."
 
-  - topology_sibling_mask():
+  - topology_sibling_cpumask():
 
     The cpumask contains all online threads in the core to which a thread
     belongs.
index 9a7f76eadae9a51c5b49e5faadf8aa0aef19f15c..4623caf8d72d8591b87a300b35b5ad0aca10f6a5 100644 (file)
@@ -1238,7 +1238,7 @@ F:        drivers/clk/at91
 
 ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
-M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
+M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.linux4sam.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@ -1590,7 +1590,7 @@ ARM/Marvell Dove/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/soc/dove/
@@ -1604,7 +1604,7 @@ F:        arch/arm/boot/dts/orion5x*
 ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Gregory Clement <gregory.clement@free-electrons.com>
+M:     Gregory Clement <gregory.clement@bootlin.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -1999,8 +1999,10 @@ M:       Maxime Coquelin <mcoquelin.stm32@gmail.com>
 M:     Alexandre Torgue <alexandre.torgue@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next
 N:     stm32
+F:     arch/arm/boot/dts/stm32*
+F:     arch/arm/mach-stm32/
 F:     drivers/clocksource/armv7m_systick.c
 
 ARM/TANGO ARCHITECTURE
@@ -7600,8 +7602,10 @@ F:       mm/kasan/
 F:     scripts/Makefile.kasan
 
 KCONFIG
+M:     Masahiro Yamada <yamada.masahiro@socionext.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
 L:     linux-kbuild@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     Documentation/kbuild/kconfig-language.txt
 F:     scripts/kconfig/
 
@@ -7909,7 +7913,6 @@ S:        Maintained
 F:     scripts/leaking_addresses.pl
 
 LED SUBSYSTEM
-M:     Richard Purdie <rpurdie@rpsys.net>
 M:     Jacek Anaszewski <jacek.anaszewski@gmail.com>
 M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-leds@vger.kernel.org
@@ -10927,6 +10930,17 @@ L:     linux-gpio@vger.kernel.org
 S:     Supported
 F:     drivers/pinctrl/pinctrl-at91-pio4.*
 
+PIN CONTROLLER - FREESCALE
+M:     Dong Aisheng <aisheng.dong@nxp.com>
+M:     Fabio Estevam <festevam@gmail.com>
+M:     Shawn Guo <shawnguo@kernel.org>
+M:     Stefan Agner <stefan@agner.ch>
+R:     Pengutronix Kernel Team <kernel@pengutronix.de>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/freescale/
+F:     Documentation/devicetree/bindings/pinctrl/fsl,*
+
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Heikki Krogerus <heikki.krogerus@linux.intel.com>
index d9cf3a40eda9d20ce03ceda2ebc921a95dc2aea7..541f0c5b71c34a004d989ba709fd5b1620491202 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -388,7 +388,7 @@ PYTHON              = python
 CHECK          = sparse
 
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-                 -Wbitwise -Wno-return-void $(CF)
+                 -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
 NOSTDINC_FLAGS  =
 CFLAGS_MODULE   =
 AFLAGS_MODULE   =
@@ -489,6 +489,11 @@ KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
 endif
 
+RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+export RETPOLINE_CFLAGS
+
 ifeq ($(config-targets),1)
 # ===========================================================================
 # *config targets only - make sure prerequisites are updated, and descend
@@ -579,10 +584,9 @@ ifeq ($(KBUILD_EXTMOD),)
 # To avoid any implicit rule to kick in, define an empty command
 $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
 
-# If .config is newer than include/config/auto.conf, someone tinkered
-# with it and forgot to run make oldconfig.
-# if auto.conf.cmd is missing then we are probably in a cleaned tree so
-# we execute the config step to be sure to catch updated Kconfig files
+# The actual configuration files used during the build are stored in
+# include/generated/ and include/config/. Update them if .config is newer than
+# include/config/auto.conf (which mirrors .config).
 include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
        $(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
 else
@@ -857,8 +861,7 @@ KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
 KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
 
 # Use --build-id when available.
-LDFLAGS_BUILD_ID := $(patsubst -Wl$(comma)%,%,\
-                             $(call cc-ldoption, -Wl$(comma)--build-id,))
+LDFLAGS_BUILD_ID := $(call ld-option, --build-id)
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
index 46ebf14aed4e55348b1b96aa82516348e00171c3..8a2b331e43febb724f2072498b1c4b31daa6f757 100644 (file)
@@ -6,7 +6,6 @@
  * Atomic exchange routines.
  */
 
-#define __ASM__MB
 #define ____xchg(type, args...)                __xchg ## type ## _local(args)
 #define ____cmpxchg(type, args...)     __cmpxchg ## type ## _local(args)
 #include <asm/xchg.h>
        cmpxchg_local((ptr), (o), (n));                                 \
 })
 
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB      "\tmb\n"
-#endif
 #undef ____xchg
 #undef ____cmpxchg
 #define ____xchg(type, args...)                __xchg ##type(args)
@@ -64,7 +59,6 @@
        cmpxchg((ptr), (o), (n));                                       \
 })
 
-#undef __ASM__MB
 #undef ____cmpxchg
 
 #endif /* _ALPHA_CMPXCHG_H */
index 68dfb3cb71454384dd187edfd0dded0fe4b65117..e2b59fac5257de10134c8d0c116d8a4827b76a09 100644 (file)
  * Atomic exchange.
  * Since it can be used to implement critical sections
  * it must clobber "memory" (also for interrupts in UP).
+ *
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
  */
 
 static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
 {
        unsigned long ret, tmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %4,7,%3\n"
        "       insbl   %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
 {
        unsigned long ret, tmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %4,7,%3\n"
        "       inswl   %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%3)\n"
        "       beq     %2,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br      1b\n"
        ".previous"
        : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
        : "r" ((long)m), "1" (val) : "memory");
+       smp_mb();
 
        return ret;
 }
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
 {
        unsigned long dummy;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldl_l %0,%4\n"
        "       bis $31,%3,%1\n"
        "       stl_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
 {
        unsigned long dummy;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldq_l %0,%4\n"
        "       bis $31,%3,%1\n"
        "       stq_c %1,%2\n"
        "       beq %1,2f\n"
-               __ASM__MB
        ".subsection 2\n"
        "2:     br 1b\n"
        ".previous"
        : "=&r" (val), "=&r" (dummy), "=m" (*m)
        : "rI" (val), "m" (*m) : "memory");
+       smp_mb();
 
        return val;
 }
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
  * store NEW in MEM.  Return the initial value in MEM.  Success is
  * indicated by comparing RETURN with OLD.
  *
- * The memory barrier should be placed in SMP only when we actually
- * make the change. If we don't change anything (so if the returned
- * prev is equal to old) then we aren't acquiring anything new and
- * we don't need any memory barrier as far I can tell.
+ * The leading and the trailing memory barriers guarantee that these
+ * operations are fully ordered.
+ *
+ * The trailing memory barrier is placed in SMP unconditionally, in
+ * order to guarantee that dependency ordering is preserved when a
+ * dependency is headed by an unsuccessful operation.
  */
 
 static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
 {
        unsigned long prev, tmp, cmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %5,7,%4\n"
        "       insbl   %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
 {
        unsigned long prev, tmp, cmp, addr64;
 
+       smp_mb();
        __asm__ __volatile__(
        "       andnot  %5,7,%4\n"
        "       inswl   %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
        "       or      %1,%2,%2\n"
        "       stq_c   %2,0(%4)\n"
        "       beq     %2,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br      1b\n"
        ".previous"
        : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
        : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
 {
        unsigned long prev, cmp;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldl_l %0,%5\n"
        "       cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
        "       mov %4,%1\n"
        "       stl_c %1,%2\n"
        "       beq %1,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
 {
        unsigned long prev, cmp;
 
+       smp_mb();
        __asm__ __volatile__(
        "1:     ldq_l %0,%5\n"
        "       cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
        "       mov %4,%1\n"
        "       stq_c %1,%2\n"
        "       beq %1,3f\n"
-               __ASM__MB
        "2:\n"
        ".subsection 2\n"
        "3:     br 1b\n"
        ".previous"
        : "=&r"(prev), "=&r"(cmp), "=m"(*m)
        : "r"((long) old), "r"(new), "m"(*m) : "memory");
+       smp_mb();
 
        return prev;
 }
index f3a80cf164cc92a0860ddb77f5402dd7d8856ab0..d76bf4a8374016043963f1c04d42700e64ad605a 100644 (file)
@@ -484,7 +484,6 @@ config ARC_CURR_IN_REG
 
 config ARC_EMUL_UNALIGNED
        bool "Emulate unaligned memory access (userspace only)"
-       default N
        select SYSCTL_ARCH_UNALIGN_NO_WARN
        select SYSCTL_ARCH_UNALIGN_ALLOW
        depends on ISA_ARCOMPACT
index 70aec7d6ca600c2889d0a78bc3dac5b5fcb5b967..626b694c7be75ccf7868d7423aca79fb58eef586 100644 (file)
@@ -17,6 +17,6 @@
        compatible = "snps,axs101", "snps,arc-sdp";
 
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60";
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60 print-fatal-signals=1";
        };
 };
index 74d070cd3c13a723fef1a2b3cd91cd2919392762..47b74fbc403c21cc2f493f6f84d6216b7c5ef5c1 100644 (file)
                        };
 
                        eeprom@0x54{
-                               compatible = "24c01";
+                               compatible = "atmel,24c01";
                                reg = <0x54>;
                                pagesize = <0x8>;
                        };
 
                        eeprom@0x57{
-                               compatible = "24c04";
+                               compatible = "atmel,24c04";
                                reg = <0x57>;
                                pagesize = <0x8>;
                        };
index 215cddd0b63baa4d1f865828e1df50914526e64e..0c603308aeb360880bca3a3403a74044cb1c12bc 100644 (file)
@@ -22,7 +22,7 @@
        };
 
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
        aliases {
index 5ee96b067c085ce1f061e0aac02732d63d4ecf7e..ff2f2c70c545645f7cb38a526914378edc303aa1 100644 (file)
@@ -17,7 +17,7 @@
        interrupt-parent = <&core_intc>;
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 8d787b251f73746191cfea3bd176bfb2869eccce..8e2489b16b0aeecb9bc83de255ccad3eeb33efd5 100644 (file)
@@ -24,7 +24,7 @@
        };
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 4f98ebf71fd836624b248aa0cd107fb892bb76b2..ed12f494721df91f4597b2ead8b1c7ecd5a4f5e2 100644 (file)
@@ -15,7 +15,7 @@
        interrupt-parent = <&core_intc>;
 
        chosen {
-               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8";
+               bootargs = "earlycon=arc_uart,mmio32,0xc0fc1000,115200n8 console=ttyARC0,115200n8 print-fatal-signals=1";
        };
 
        aliases {
index 3c391ba565ed080cfad8b66f4c3395975eec90da..7842e5eb4ab5cbb73c1bac4d881d7b332979bdaa 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index 14a727cbf4c98e12590f0ad74148f24971f772f4..b8838cf2b4ec72ab119e72f367c6ef6184a72bce 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index 5052917d4a99490ea77d2e18cc03b97ce3722f3a..72a2c723f1f7af826b07c49a416bf3265ed39f89 100644 (file)
@@ -18,7 +18,7 @@
 
        chosen {
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24 print-fatal-signals=1";
        };
 
        aliases {
index ea022d47896cef2761bbb8a147844c506f358621..21ec82466d62c89922566ec3bf32551c2dce003b 100644 (file)
@@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
 
 #define BUG()  do {                                                            \
        pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
-       dump_stack();                                                           \
+       barrier_before_unreachable();                                           \
+       __builtin_trap();                                                       \
 } while (0)
 
 #define HAVE_ARCH_BUG
index 257a68f3c2feef3a369093f5bade883e814b7648..309f4e6721b3e22829847f88a4da884fdc9edf93 100644 (file)
 .macro FAKE_RET_FROM_EXCPN
        lr      r9, [status32]
        bic     r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
-       or      r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
+       or      r9, r9, STATUS_IE_MASK
        kflag   r9
 .endm
 
index f61a52b01625b106143b089570b327b585e5a254..5fe84e481654ebe76a483bf81c2a11ffe870322d 100644 (file)
@@ -22,10 +22,79 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
 
 static char smp_cpuinfo_buf[128];
 
+/*
+ * Set mask to halt GFRC if any online core in SMP cluster is halted.
+ * Only works for ARC HS v3.0+, on earlier versions has no effect.
+ */
+static void mcip_update_gfrc_halt_mask(int cpu)
+{
+       struct bcr_generic gfrc;
+       unsigned long flags;
+       u32 gfrc_halt_mask;
+
+       READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
+
+       /*
+        * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
+        * GFRC 0x3 version.
+        */
+       if (gfrc.ver < 0x3)
+               return;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       __mcip_cmd(CMD_GFRC_READ_CORE, 0);
+       gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+       gfrc_halt_mask |= BIT(cpu);
+       __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
+static void mcip_update_debug_halt_mask(int cpu)
+{
+       u32 mcip_mask = 0;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       /*
+        * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
+        * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
+        * and CMD_DEBUG_READ_SELECT.
+        */
+       __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
+       mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+       mcip_mask |= BIT(cpu);
+
+       __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
+       /*
+        * Parameter specified halt cause:
+        * STATUS32[H]/actionpoint/breakpoint/self-halt
+        * We choose all of them (0xF).
+        */
+       __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+}
+
 static void mcip_setup_per_cpu(int cpu)
 {
+       struct mcip_bcr mp;
+
+       READ_BCR(ARC_REG_MCIP_BCR, mp);
+
        smp_ipi_irq_setup(cpu, IPI_IRQ);
        smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
+
+       /* Update GFRC halt mask as new CPU came online */
+       if (mp.gfrc)
+               mcip_update_gfrc_halt_mask(cpu);
+
+       /* Update MCIP debug mask as new CPU came online */
+       if (mp.dbg)
+               mcip_update_debug_halt_mask(cpu);
 }
 
 static void mcip_ipi_send(int cpu)
@@ -101,11 +170,6 @@ static void mcip_probe_n_setup(void)
                IS_AVAIL1(mp.gfrc, "GFRC"));
 
        cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
-
-       if (mp.dbg) {
-               __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
-               __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
-       }
 }
 
 struct plat_smp_ops plat_smp_ops = {
index 9d27331fe69a0eb441b34e51324138ef375070b0..b2cae79a25d716165eaf65060cb8ed0be11f3b6c 100644 (file)
@@ -51,7 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
        { 0x51, "R2.0" },
        { 0x52, "R2.1" },
        { 0x53, "R3.0" },
-       { 0x54, "R4.0" },
+       { 0x54, "R3.10a" },
 #endif
        { 0x00, NULL   }
 };
@@ -373,7 +373,7 @@ static void arc_chk_core_config(void)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        int saved = 0, present = 0;
-       char *opt_nm = NULL;;
+       char *opt_nm = NULL;
 
        if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
index efe8b4200a676529a9f3f0af52d50faca176a1e3..21d86c36692b4f9bbf2e6c255832d7c98377c545 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/reboot.h>
 #include <linux/irqdomain.h>
 #include <linux/export.h>
+#include <linux/of_fdt.h>
 
 #include <asm/processor.h>
 #include <asm/setup.h>
@@ -47,6 +48,42 @@ void __init smp_prepare_boot_cpu(void)
 {
 }
 
+static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
+{
+       unsigned long dt_root = of_get_flat_dt_root();
+       const char *buf;
+
+       buf = of_get_flat_dt_prop(dt_root, name, NULL);
+       if (!buf)
+               return -EINVAL;
+
+       if (cpulist_parse(buf, cpumask))
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * Read from DeviceTree and setup cpu possible mask. If there is no
+ * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
+ */
+static void __init arc_init_cpu_possible(void)
+{
+       struct cpumask cpumask;
+
+       if (arc_get_cpu_map("possible-cpus", &cpumask)) {
+               pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
+                       NR_CPUS);
+
+               cpumask_setall(&cpumask);
+       }
+
+       if (!cpumask_test_cpu(0, &cpumask))
+               panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
+
+       init_cpu_possible(&cpumask);
+}
+
 /*
  * Called from setup_arch() before calling setup_processor()
  *
@@ -58,10 +95,7 @@ void __init smp_prepare_boot_cpu(void)
  */
 void __init smp_init_cpus(void)
 {
-       unsigned int i;
-
-       for (i = 0; i < NR_CPUS; i++)
-               set_cpu_possible(i, true);
+       arc_init_cpu_possible();
 
        if (plat_smp_ops.init_early_smp)
                plat_smp_ops.init_early_smp();
@@ -70,16 +104,12 @@ void __init smp_init_cpus(void)
 /* called from init ( ) =>  process 1 */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int i;
-
        /*
         * if platform didn't set the present map already, do it now
         * boot cpu is set to present already by init/main.c
         */
-       if (num_present_cpus() <= 1) {
-               for (i = 0; i < max_cpus; i++)
-                       set_cpu_present(i, true);
-       }
+       if (num_present_cpus() <= 1)
+               init_cpu_present(cpu_possible_mask);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
index 333daab7def028761478b4215bf7f50335d4f05d..183391d4d33a4138d04418da4b90d61efe38c6c4 100644 (file)
@@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table,
        return;
 
 ret_err:
-       panic("Attention !!! Dwarf FDE parsing errors\n");;
+       panic("Attention !!! Dwarf FDE parsing errors\n");
 }
 
 #ifdef CONFIG_MODULES
index eee924dfffa6e1baf08221ee5e7e2cd23937d782..2072f3451e9c2127a076873113292a8ae2b5b9cb 100644 (file)
@@ -780,7 +780,10 @@ noinline static void slc_entire_op(const int op)
 
        write_aux_reg(r, ctrl);
 
-       write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+       if (op & OP_INV)        /* Inv or flush-n-inv use same cmd reg */
+               write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
+       else
+               write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
 
        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
        read_aux_reg(r);
index 18045c38bcf1af1f01b4732ae200cf274e3b6eec..db7cded1b7ada534ef34f49c27cd0dce34d7cdcc 100644 (file)
@@ -55,7 +55,7 @@
                      <0x3ff00100 0x100>;
        };
 
-       smc@0x3404c000 {
+       smc@3404c000 {
                compatible = "brcm,bcm11351-smc", "brcm,kona-smc";
                reg = <0x3404c000 0x400>; /* 1 KiB in SRAM */
        };
index 6dde95f21cef6b53c995b7311ddade7988dd965a..266f2611dc22126705aab35cb6cb67601e9f3704 100644 (file)
@@ -55,7 +55,7 @@
                      <0x3ff00100 0x100>;
        };
 
-       smc@0x3404e000 {
+       smc@3404e000 {
                compatible = "brcm,bcm21664-smc", "brcm,kona-smc";
                reg = <0x3404e000 0x400>; /* 1 KiB in SRAM */
        };
index 0e3d2a5ff2081425bd5ccb0097736ebdef3f99a5..a5c3824c80563cf3222f77af9a6ffe39e22b1e0f 100644 (file)
        soc {
                ranges = <0x7e000000 0x20000000 0x02000000>;
                dma-ranges = <0x40000000 0x00000000 0x20000000>;
+       };
 
-               arm-pmu {
-                       compatible = "arm,arm1176-pmu";
-               };
+       arm-pmu {
+               compatible = "arm,arm1176-pmu";
        };
 };
 
index 1dfd7644277736fb58fa4ad8352091677a000d5e..c933e841388421045d908e0147012d0fca0ae670 100644 (file)
@@ -9,19 +9,19 @@
                         <0x40000000 0x40000000 0x00001000>;
                dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
 
-               local_intc: local_intc {
+               local_intc: local_intc@40000000 {
                        compatible = "brcm,bcm2836-l1-intc";
                        reg = <0x40000000 0x100>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
                        interrupt-parent = <&local_intc>;
                };
+       };
 
-               arm-pmu {
-                       compatible = "arm,cortex-a7-pmu";
-                       interrupt-parent = <&local_intc>;
-                       interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
-               };
+       arm-pmu {
+               compatible = "arm,cortex-a7-pmu";
+               interrupt-parent = <&local_intc>;
+               interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        timer {
index efa7d3387ab287fb72e66659644a36903c85e5fc..7704bb029605ed94f3ea8aad77dfe301e4b0beac 100644 (file)
@@ -8,7 +8,7 @@
                         <0x40000000 0x40000000 0x00001000>;
                dma-ranges = <0xc0000000 0x00000000 0x3f000000>;
 
-               local_intc: local_intc {
+               local_intc: local_intc@40000000 {
                        compatible = "brcm,bcm2836-l1-intc";
                        reg = <0x40000000 0x100>;
                        interrupt-controller;
index 18db25a5a66e0c1685457cd7959bdf239db5c76c..9d293decf8d353cdda502f5035212d967c364973 100644 (file)
                        status = "disabled";
                };
 
-               aux: aux@0x7e215000 {
+               aux: aux@7e215000 {
                        compatible = "brcm,bcm2835-aux";
                        #clock-cells = <1>;
                        reg = <0x7e215000 0x8>;
index 6a44b8021702176c63d09e55925ffd3d7e02994e..f0e2008f7490146a22ae06fbc310dbe5cb18c4d5 100644 (file)
@@ -49,7 +49,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0x60000000 0x80000000>;
+               reg = <0x60000000 0x20000000>;
        };
 
        gpio-restart {
index 08568ce24d06fc9576b57c373ab96aedc80c23b7..da8bb9d60f99e6b5af6a3e7ff4c30215f6a91844 100644 (file)
 
                sata: sata@46000000 {
                        /* The ROM uses this muxmode */
-                       cortina,gemini-ata-muxmode = <3>;
+                       cortina,gemini-ata-muxmode = <0>;
                        cortina,gemini-enable-sata-bridge;
                        status = "okay";
                };
index cf42c2f5cdc7f9d13409efbd8db6d18d969f2511..1281bc39b7ab87a430b5edaaacf187b82526186d 100644 (file)
@@ -42,7 +42,7 @@
 
 /dts-v1/;
 
-#include "imx6q.dtsi"
+#include "imx6dl.dtsi"
 #include "imx6qdl-icore-rqs.dtsi"
 
 / {
index c1aa7a4518fbaca19e734795e25b4767a06a6005..a30ee9fcb3ae537dacabc5ff8e942ef26933bcfd 100644 (file)
@@ -71,6 +71,8 @@
 };
 
 &i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <2600000>;
 
        twl: twl@48 {
                >;
        };
 
-
+       i2c1_pins: pinmux_i2c1_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
+                       OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
+               >;
+       };
 };
 
 &omap3_pmx_wkup {
index b50b796e15c778237926e91848e17802f584eb7c..47915447a82660c9a459b7041886be1cb6e52f1a 100644 (file)
@@ -66,6 +66,8 @@
 };
 
 &i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins>;
        clock-frequency = <2600000>;
 
        twl: twl@48 {
                        OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0)        /* hsusb0_data7.hsusb0_data7 */
                >;
        };
+       i2c1_pins: pinmux_i2c1_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
+                       OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
+               >;
+       };
 };
 
 &uart2 {
index ec2c8baef62ac00bf40b7ae1d28da2eab3a327d0..592e17fd4eeb7ccd1faa8f9d16635cf562092982 100644 (file)
@@ -47,7 +47,7 @@
                        gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;    /* gpio3_83 */
                        wakeup-source;
                        autorepeat;
-                       debounce_interval = <50>;
+                       debounce-interval = <50>;
                };
        };
 
index 3b704cfed69ac1f7b39343925fb2037c9e78e10b..a97458112ff6e80ca198fe1377521372973e8ca4 100644 (file)
                max-frequency = <37500000>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
                resets = <&cru SRST_SDIO>;
                max-frequency = <37500000>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                default-sample-phase = <158>;
                disable-wp;
                dmas = <&pdma 12>;
index 780ec3a99b21f857b414d4d47b8e773cd28c7788..341deaf62ff621ad9122b7b9c2c9950edf5eaa71 100644 (file)
                interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
                         <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                pinctrl-names = "default";
                pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_bus4>;
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                pinctrl-names = "default";
                pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
                max-frequency = <37500000>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                bus-width = <8>;
                default-sample-phase = <158>;
                fifo-depth = <0x100>;
index 99cfae875e12e6e3ec9e333c7534a058bbe0a3e8..5eae4776ffdeece372e74cc2ba799103b99cb04f 100644 (file)
        };
 };
 
-&cpu0 {
-       cpu0-supply = <&vdd_cpu>;
-       operating-points = <
-               /* KHz    uV */
-               1800000 1400000
-               1608000 1350000
-               1512000 1300000
-               1416000 1200000
-               1200000 1100000
-               1008000 1050000
-                816000 1000000
-                696000  950000
-                600000  900000
-                408000  900000
-                312000  900000
-                216000  900000
-                126000  900000
-       >;
-};
-
 &emmc {
        status = "okay";
        bus-width = <8>;
index 8a74efdb636062e28e309e130f557e5bc582d65f..240e7a23d81ff3cc2eb2facad8a126970775a152 100644 (file)
@@ -56,7 +56,7 @@
                        clocks = <&topclk ZX296702_A9_PERIPHCLK>;
                };
 
-               l2cc: l2-cache-controller@0x00c00000 {
+               l2cc: l2-cache-controller@c00000 {
                        compatible = "arm,pl310-cache";
                        reg = <0x00c00000 0x1000>;
                        cache-unified;
                        arm,double-linefill-incr = <0>;
                };
 
-               pcu: pcu@0xa0008000 {
+               pcu: pcu@a0008000 {
                        compatible = "zte,zx296702-pcu";
                        reg = <0xa0008000 0x1000>;
                };
 
-               topclk: topclk@0x09800000 {
+               topclk: topclk@9800000 {
                        compatible = "zte,zx296702-topcrm-clk";
                        reg = <0x09800000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp1clk: lsp1clk@0x09400000 {
+               lsp1clk: lsp1clk@9400000 {
                        compatible = "zte,zx296702-lsp1crpm-clk";
                        reg = <0x09400000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               lsp0clk: lsp0clk@0x0b000000 {
+               lsp0clk: lsp0clk@b000000 {
                        compatible = "zte,zx296702-lsp0crpm-clk";
                        reg = <0x0b000000 0x1000>;
                        #clock-cells = <1>;
                };
 
-               uart0: serial@0x09405000 {
+               uart0: serial@9405000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09405000 0x1000>;
                        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -98,7 +98,7 @@
                        status = "disabled";
                };
 
-               uart1: serial@0x09406000 {
+               uart1: serial@9406000 {
                        compatible = "zte,zx296702-uart";
                        reg = <0x09406000 0x1000>;
                        interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
 
-               mmc0: mmc@0x09408000 {
+               mmc0: mmc@9408000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                };
 
-               mmc1: mmc@0x0b003000 {
+               mmc1: mmc@b003000 {
                        compatible = "snps,dw-mshc";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                };
 
-               sysctrl: sysctrl@0xa0007000 {
+               sysctrl: sysctrl@a0007000 {
                        compatible = "zte,sysctrl", "syscon";
                        reg = <0xa0007000 0x1000>;
                };
index 2f145c4af93a0fb387d86fea028d0c82d0041be8..92674f247a12a48ee603f992c075a8d6ac5c7d71 100644 (file)
@@ -319,7 +319,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_RC_CORE=m
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_LIRC=m
+CONFIG_LIRC=y
 CONFIG_RC_DEVICES=y
 CONFIG_IR_RX51=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
index 629f8e9981f1ee775afa792d5cd86ddbded76493..cf2701cb0de8c67b605a19bc2f21bc1aa34daad2 100644 (file)
@@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts)
 }
 
 static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;;
+static clock_access_fn __read_boot_clock = dummy_clock_access;
 
 void read_persistent_clock64(struct timespec64 *ts)
 {
index 5638ce0c95241f7f3afd3e994aaa9993347c9be7..63d6b404d88e39bf581c0434aff5d6b0e951279f 100644 (file)
@@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
 
 KVM=../../../../virt/kvm
 
+CFLAGS_ARMV7VE            :=$(call cc-option, -march=armv7ve)
+
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
 obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
 obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+CFLAGS_banked-sr.o        += $(CFLAGS_ARMV7VE)
+
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+CFLAGS_switch.o                   += $(CFLAGS_ARMV7VE)
 obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
index 111bda8cdebdc7e59789103838087920aedf0efe..be4b8b0a40ade5c8e412bbc75f2b311c389aa979 100644 (file)
 
 #include <asm/kvm_hyp.h>
 
+/*
+ * gcc before 4.9 doesn't understand -march=armv7ve, so we have to
+ * trick the assembler.
+ */
 __asm__(".arch_extension     virt");
 
 void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
index ee1f83b1a3324383b09106f01b830ea2802e210d..4c89a8e9a2e374cedfe92941661f38c2b6bc3b10 100644 (file)
@@ -69,7 +69,7 @@ static void clps711x_restart(enum reboot_mode mode, const char *cmd)
        soft_restart(0);
 }
 
-static const char *clps711x_compat[] __initconst = {
+static const char *const clps711x_compat[] __initconst = {
        "cirrus,ep7209",
        NULL
 };
index e457f299cd4430d359063788886d244a53d39f12..d6b11907380c83ddce9bc4998835a11e7a466ec7 100644 (file)
@@ -368,7 +368,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm355_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index be997243447b949699fd116970ec0aacaffbf444..fad9a5611a5d276ce15f0fcfe9aedcc9a5d03802 100644 (file)
@@ -217,7 +217,7 @@ static struct spi_eeprom at25640a = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
+static const struct spi_board_info dm355_leopard_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640a,
index e75741fb2c1da095bd59ba4e0f411a6274ce6255..e3780986d2a3b40aff033f0a22759be80272cf39 100644 (file)
@@ -726,7 +726,7 @@ static struct spi_eeprom at25640 = {
        .flags          = EE_ADDR2,
 };
 
-static struct spi_board_info dm365_evm_spi_info[] __initconst = {
+static const struct spi_board_info dm365_evm_spi_info[] __initconst = {
        {
                .modalias       = "at25",
                .platform_data  = &at25640,
index 6b32dc527edcd58396dc94781992f3970cc8e793..2c20599cc3506326eddc2993afd8df2bd308e268 100644 (file)
@@ -41,7 +41,7 @@ config MACH_ARMADA_375
        depends on ARCH_MULTI_V7
        select ARMADA_370_XP_IRQ
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARMADA_375_CLK
        select HAVE_ARM_SCU
@@ -57,7 +57,7 @@ config MACH_ARMADA_38X
        bool "Marvell Armada 380/385 boards"
        depends on ARCH_MULTI_V7
        select ARM_ERRATA_720789
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_GIC
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index 43e3e188f521341884d0a6e280d5724ef606e13f..fa512413a47172212483ebec6811bc5547aa729b 100644 (file)
@@ -1011,17 +1011,17 @@ static int clk_debugfs_register_one(struct clk *c)
                return -ENOMEM;
        c->dent = d;
 
-       d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
+       d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
        }
-       d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
+       d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
        }
-       d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
+       d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
        if (!d) {
                err = -ENOMEM;
                goto err_out;
index 4bb6751864a50e046e74c0952ad75571e1d979d0..fc5fb776a7101234bd64da673815d10a0b75f0f2 100644 (file)
@@ -299,8 +299,6 @@ static void irq_save_context(void)
        if (soc_is_dra7xx())
                return;
 
-       if (!sar_base)
-               sar_base = omap4_get_sar_ram_base();
        if (wakeupgen_ops && wakeupgen_ops->save_context)
                wakeupgen_ops->save_context();
 }
@@ -598,6 +596,8 @@ static int __init wakeupgen_init(struct device_node *node,
        irq_hotplug_init();
        irq_pm_init();
 
+       sar_base = omap4_get_sar_ram_base();
+
        return 0;
 }
 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
index 124f9af34a15a3145edecb30108ced8a3b13a2f5..34156eca8e234f45745e849fb460fd42381cfa76 100644 (file)
@@ -977,6 +977,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
 
        pr_debug("omap_hwmod: %s: enabling clocks\n", oh->name);
 
+       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
+               _enable_optional_clocks(oh);
+
        if (oh->_clk)
                clk_enable(oh->_clk);
 
@@ -985,9 +988,6 @@ static int _enable_clocks(struct omap_hwmod *oh)
                        clk_enable(os->_clk);
        }
 
-       if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
-               _enable_optional_clocks(oh);
-
        /* The opt clocks are controlled by the device driver. */
 
        return 0;
index 366158a54fcd8beae9ff50d712e1b5c63f87d456..6f68576e56956a635acae35af565d09bb2ff2d0f 100644 (file)
@@ -186,7 +186,7 @@ static void omap_pm_end(void)
        cpu_idle_poll_ctrl(false);
 }
 
-static void omap_pm_finish(void)
+static void omap_pm_wake(void)
 {
        if (soc_is_omap34xx())
                omap_prcm_irq_complete();
@@ -196,7 +196,7 @@ static const struct platform_suspend_ops omap_pm_ops = {
        .begin          = omap_pm_begin,
        .end            = omap_pm_end,
        .enter          = omap_pm_enter,
-       .finish         = omap_pm_finish,
+       .wake           = omap_pm_wake,
        .valid          = suspend_valid_only_mem,
 };
 
index ece09c9461f78d9b3908095615a688522b69e9b3..d61fbd7a2840a4980205c16d1c675a957c6292c8 100644 (file)
@@ -156,12 +156,6 @@ static struct clock_event_device clockevent_gpt = {
        .tick_resume            = omap2_gp_timer_shutdown,
 };
 
-static struct property device_disabled = {
-       .name = "status",
-       .length = sizeof("disabled"),
-       .value = "disabled",
-};
-
 static const struct of_device_id omap_timer_match[] __initconst = {
        { .compatible = "ti,omap2420-timer", },
        { .compatible = "ti,omap3430-timer", },
@@ -203,8 +197,17 @@ static struct device_node * __init omap_get_timer_dt(const struct of_device_id *
                                  of_get_property(np, "ti,timer-secure", NULL)))
                        continue;
 
-               if (!of_device_is_compatible(np, "ti,omap-counter32k"))
-                       of_add_property(np, &device_disabled);
+               if (!of_device_is_compatible(np, "ti,omap-counter32k")) {
+                       struct property *prop;
+
+                       prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+                       if (!prop)
+                               return NULL;
+                       prop->name = "status";
+                       prop->value = "disabled";
+                       prop->length = strlen(prop->value);
+                       of_add_property(np, prop);
+               }
                return np;
        }
 
index 57058ac46f49733887e439012afa3247f03e1737..7e5d7a083707074b639dc35ba18c40a14312a38c 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
-#include <linux/perf/arm_pmu.h>
 #include <linux/regulator/machine.h>
 
 #include <asm/outercache.h>
@@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
        prcmu_system_reset(0);
 }
 
-/*
- * The PMU IRQ lines of two cores are wired together into a single interrupt.
- * Bounce the interrupt to the other core if it's not ours.
- */
-static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
-{
-       irqreturn_t ret = handler(irq, dev);
-       int other = !smp_processor_id();
-
-       if (ret == IRQ_NONE && cpu_online(other))
-               irq_set_affinity(irq, cpumask_of(other));
-
-       /*
-        * We should be able to get away with the amount of IRQ_NONEs we give,
-        * while still having the spurious IRQ detection code kick in if the
-        * interrupt really starts hitting spuriously.
-        */
-       return ret;
-}
-
-static struct arm_pmu_platdata db8500_pmu_platdata = {
-       .handle_irq             = db8500_pmu_handler,
-       .irq_flags              = IRQF_NOBALANCING | IRQF_NO_THREAD,
-};
-
-static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
-       /* Requires call-back bindings. */
-       OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
-       {},
-};
-
 static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
        {},
@@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
        if (of_machine_is_compatible("st-ericsson,u8540"))
                of_platform_populate(NULL, u8500_local_bus_nodes,
                                     u8540_auxdata_lookup, NULL);
-       else
-               of_platform_populate(NULL, u8500_local_bus_nodes,
-                                    u8500_auxdata_lookup, NULL);
 }
 
 static const char * stericsson_dt_platform_compat[] = {
index aff6994950ba6db7eb6579a90cc94e5b2bfc7329..a2399fd66e97cef3db011508dc73c718c9456bc9 100644 (file)
@@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
-static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
-static __initdata struct mdio_board_info
-                 orion_ge00_switch_board_info;
+static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
+       .bus_id   = "orion-mii",
+       .modalias = "mv88e6085",
+};
 
 void __init orion_ge00_switch_init(struct dsa_chip_data *d)
 {
-       struct mdio_board_info *bd;
        unsigned int i;
 
        if (!IS_BUILTIN(CONFIG_PHYLIB))
                return;
 
-       for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
-               if (!strcmp(d->port_names[i], "cpu"))
+       for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
+               if (!strcmp(d->port_names[i], "cpu")) {
+                       d->netdev[i] = &orion_ge00.dev;
                        break;
+               }
+       }
 
-       bd = &orion_ge00_switch_board_info;
-       bd->bus_id = orion_ge00_mvmdio_bus_name;
-       bd->mdio_addr = d->sw_addr;
-       d->netdev[i] = &orion_ge00.dev;
-       strcpy(bd->modalias, "mv88e6085");
-       bd->platform_data = d;
+       orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
+       orion_ge00_switch_board_info.platform_data = d;
 
        mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
 }
index a80632641b39f8cb5dc8554aa2b2e85b6551e0ea..70c776ef7aa7321450def075b8f150ef1dbbbf9f 100644 (file)
 
                        uart_A: serial@24000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x24000 0x0 0x14>;
+                               reg = <0x0 0x24000 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@23000 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-uart";
-                               reg = <0x0 0x23000 0x0 0x14>;
+                               reg = <0x0 0x23000 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 6cb3c2a52bafe5f0db8a0018b35e88998ed90f01..4ee2e7951482f43122620d2668b244de1744e6b9 100644 (file)
 
                        uart_A: serial@84c0 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84c0 0x0 0x14>;
+                               reg = <0x0 0x84c0 0x0 0x18>;
                                interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_B: serial@84dc {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x84dc 0x0 0x14>;
+                               reg = <0x0 0x84dc 0x0 0x18>;
                                interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_C: serial@8700 {
                                compatible = "amlogic,meson-gx-uart";
-                               reg = <0x0 0x8700 0x0 0x14>;
+                               reg = <0x0 0x8700 0x0 0x18>;
                                interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO: serial@4c0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004c0 0x0 0x14>;
+                               reg = <0x0 0x004c0 0x0 0x18>;
                                interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        uart_AO_B: serial@4e0 {
                                compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart";
-                               reg = <0x0 0x004e0 0x0 0x14>;
+                               reg = <0x0 0x004e0 0x0 0x18>;
                                interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index 4f355f17eed6bcc29dcc14fb4546fce6a083a57f..c8514110b9da2dc2f40988ad0ae733437e33420a 100644 (file)
 
                        internal_phy: ethernet-phy@8 {
                                compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22";
+                               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <8>;
                                max-speed = <100>;
                        };
index 4220fbdcb24a7f18c5e3ab66574ba22c8c92c873..ff5c4c47b22bfecfa36f0090dc8be5c85b171271 100644 (file)
@@ -98,7 +98,7 @@
                clock-output-names = "clk125mhz";
        };
 
-       pci {
+       pcie@30000000 {
                compatible = "pci-host-ecam-generic";
                device_type = "pci";
                #interrupt-cells = <1>;
                ranges =
                  <0x02000000    0 0x40000000    0 0x40000000    0 0x20000000
                   0x43000000 0x40 0x00000000 0x40 0x00000000 0x20 0x00000000>;
+               bus-range = <0 0xff>;
                interrupt-map-mask = <0 0 0 7>;
                interrupt-map =
                      /* addr  pin  ic   icaddr  icintr */
index e94fa1a531922ee6b160246c4399435574909641..047641fe294c64c9dbc04dcb477827814a809677 100644 (file)
@@ -51,7 +51,7 @@
                #size-cells = <2>;
                ranges;
 
-               ramoops@0x21f00000 {
+               ramoops@21f00000 {
                        compatible = "ramoops";
                        reg = <0x0 0x21f00000 0x0 0x00100000>;
                        record-size     = <0x00020000>;
index 9fbe4705ee88bfaf1eb12a7208de0c5899d7f9d3..94597e33c8065eb4b12ee805885988991d43e42c 100644 (file)
                        reg = <0 0x10005000 0 0x1000>;
                };
 
-               pio: pinctrl@0x10005000 {
+               pio: pinctrl@10005000 {
                        compatible = "mediatek,mt8173-pinctrl";
                        reg = <0 0x1000b000 0 0x1000>;
                        mediatek,pctl-regmap = <&syscfg_pctl_a>;
index 492a011f14f6cef933dc16ce9cf591d8cdc5c79e..1c8f1b86472de9c149b706502dcc552f19376ae5 100644 (file)
                };
 
                agnoc@0 {
-                       qcom,pcie@00600000 {
+                       qcom,pcie@600000 {
                                perst-gpio = <&msmgpio 35 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00608000 {
+                       qcom,pcie@608000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 130 GPIO_ACTIVE_LOW>;
                        };
 
-                       qcom,pcie@00610000 {
+                       qcom,pcie@610000 {
                                status = "okay";
                                perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
                        };
index 4b2afcc4fdf4791da816c6bba3c6f2ef7741ad8d..0a6f7952bbb18d65847261957715e331756c46c4 100644 (file)
                        #size-cells = <1>;
                        ranges;
 
-                       pcie0: qcom,pcie@00600000 {
+                       pcie0: qcom,pcie@600000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                status = "disabled";
                                power-domains = <&gcc PCIE0_GDSC>;
 
                        };
 
-                       pcie1: qcom,pcie@00608000 {
+                       pcie1: qcom,pcie@608000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE1_GDSC>;
                                bus-range = <0x00 0xff>;
                                                "bus_slave";
                        };
 
-                       pcie2: qcom,pcie@00610000 {
+                       pcie2: qcom,pcie@610000 {
                                compatible = "qcom,pcie-msm8996", "snps,dw-pcie";
                                power-domains = <&gcc PCIE2_GDSC>;
                                bus-range = <0x00 0xff>;
index 3890468678ce1caa78a411aeb0a4a9cdedfc1302..28257724a56e74b79b83c69a76bea0da4e0fd9ed 100644 (file)
        assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
        assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>;
        clock_in_out = "input";
-       /* shows instability at 1GBit right now */
-       max-speed = <100>;
        phy-supply = <&vcc_io>;
        phy-mode = "rgmii";
        pinctrl-names = "default";
        pinctrl-0 = <&rgmiim1_pins>;
+       snps,force_thresh_dma_mode;
        snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
        snps,reset-delays-us = <0 10000 50000>;
-       tx_delay = <0x26>;
-       rx_delay = <0x11>;
+       tx_delay = <0x24>;
+       rx_delay = <0x18>;
        status = "okay";
 };
 
index a037ee56fead6db1b0bcedbbf740f6a54ca82977..cae3415544862dfddf06a034ecea62bd50739fd2 100644 (file)
                interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
                         <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
                interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
                         <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
                         <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                status = "disabled";
        };
index aa4d07046a7ba9644316cd7c57b43b64b5327766..03458ac44201c7c66d0e991881ab24e292bfdee2 100644 (file)
                max-frequency = <150000000>;
                clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
                         <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
-               clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                resets = <&cru SRST_SDIO0>;
index 0f873c897d0de5a75f9d4e4d90d7c658b7a173d3..ce592a4c0c4cdeb473a1c96106620583a8a4abef 100644 (file)
        assigned-clocks = <&cru SCLK_PCIEPHY_REF>;
        assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>;
        assigned-clock-rates = <100000000>;
-       ep-gpios = <&gpio3 RK_PB5 GPIO_ACTIVE_HIGH>;
+       ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>;
        num-lanes = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&pcie_clkreqn_cpm>;
index 7aa2144e0d47d1fb8e30a2389d4d0fbc5fc4030f..2605118d4b4ce74755ced75843d91ca22ad38ce8 100644 (file)
                compatible = "rockchip,rk3399-edp";
                reg = <0x0 0xff970000 0x0 0x8000>;
                interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH 0>;
-               clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>;
-               clock-names = "dp", "pclk";
+               clocks = <&cru PCLK_EDP>, <&cru PCLK_EDP_CTRL>, <&cru PCLK_VIO_GRF>;
+               clock-names = "dp", "pclk", "grf";
                pinctrl-names = "default";
                pinctrl-0 = <&edp_hpd>;
                power-domains = <&power RK3399_PD_EDP>;
index eda8c5f629fc8553af2cdccaede5dfd209cecf94..350c76a1d15ba7b6d8935928305aa80f53d9649e 100644 (file)
@@ -20,7 +20,7 @@
 
 #define MPIDR_UP_BITMASK       (0x1 << 30)
 #define MPIDR_MT_BITMASK       (0x1 << 24)
-#define MPIDR_HWID_BITMASK     0xff00ffffffUL
+#define MPIDR_HWID_BITMASK     UL(0xff00ffffff)
 
 #define MPIDR_LEVEL_BITS_SHIFT 3
 #define MPIDR_LEVEL_BITS       (1 << MPIDR_LEVEL_BITS_SHIFT)
index 472ef944e93260be2faad4182431331e21bc3569..902f9edacbea94b96a85321ff79d45e8ac59fd73 100644 (file)
@@ -28,7 +28,7 @@ struct stackframe {
        unsigned long fp;
        unsigned long pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       unsigned int graph;
+       int graph;
 #endif
 };
 
index 543e11f0f657e932aa17916a0750f7d83cf5d7b8..e66b0fca99c2f9e500788db6fa2e24693ade11c3 100644 (file)
@@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
  * This is equivalent to the following test:
  * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
  */
-static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
 {
-       unsigned long limit = current_thread_info()->addr_limit;
+       unsigned long ret, limit = current_thread_info()->addr_limit;
 
        __chk_user_ptr(addr);
        asm volatile(
        // A + B <= C + 1 for all A,B,C, in four easy steps:
        // 1: X = A + B; X' = X % 2^64
-       "       adds    %0, %0, %2\n"
+       "       adds    %0, %3, %2\n"
        // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
        "       csel    %1, xzr, %1, hi\n"
        // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
@@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
        //    testing X' - C == 0, subject to the previous adjustments.
        "       sbcs    xzr, %0, %1\n"
        "       cset    %0, ls\n"
-       : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+       : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
 
-       return addr;
+       return ret;
 }
 
 /*
@@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
  */
 #define untagged_addr(addr)            sign_extend64(addr, 55)
 
-#define access_ok(type, addr, size)    __range_ok((unsigned long)(addr), size)
+#define access_ok(type, addr, size)    __range_ok(addr, size)
 #define user_addr_max                  get_fs
 
 #define _ASM_EXTABLE(from, to)                                         \
index c33b5e4010ab7660140025f9c17d6be10b66706d..68450e954d47d7d241740d0de4ca3b9d2df53b75 100644 (file)
@@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
 static int swp_handler(struct pt_regs *regs, u32 instr)
 {
        u32 destreg, data, type, address = 0;
+       const void __user *user_ptr;
        int rn, rt2, res = 0;
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
@@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
                aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
 
        /* Check access in reasonable access range for both SWP and SWPB */
-       if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+       user_ptr = (const void __user *)(unsigned long)(address & ~3);
+       if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
                pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
                        address);
                goto fault;
index 29b1f873e337fb6a423c3406c02d43af18bc4847..2985a067fc131c468b7213ed1a5b0ec6548fb1c8 100644 (file)
@@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 };
 
 static const struct arm64_ftr_bits ftr_ctr[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),   /* RAO */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),           /* RES1 */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1),      /* DIC */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1),      /* IDC */
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),     /* CWG */
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),      /* ERG */
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0),     /* ERG */
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),      /* DminLine */
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
index 75b220ba73a3234b7815062537e9367d36e6a040..85a251b6dfa840dd41e852683cec4032b6a9f0d9 100644 (file)
@@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
        int pmuver;
 
        dfr0 = read_sysreg(id_aa64dfr0_el1);
-       pmuver = cpuid_feature_extract_signed_field(dfr0,
+       pmuver = cpuid_feature_extract_unsigned_field(dfr0,
                        ID_AA64DFR0_PMUVER_SHIFT);
-       if (pmuver < 1)
+       if (pmuver == 0xf || pmuver == 0)
                return;
 
        probe->present = true;
index ad8aeb098b31ed5887aa98a438622bb2304abe97..c0da6efe546558a0b4fc7c75c63a585ff37832ea 100644 (file)
@@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
 
        show_regs_print_info(KERN_DEFAULT);
        print_pstate(regs);
-       printk("pc : %pS\n", (void *)regs->pc);
-       printk("lr : %pS\n", (void *)lr);
+
+       if (!user_mode(regs)) {
+               printk("pc : %pS\n", (void *)regs->pc);
+               printk("lr : %pS\n", (void *)lr);
+       } else {
+               printk("pc : %016llx\n", regs->pc);
+               printk("lr : %016llx\n", lr);
+       }
+
        printk("sp : %016llx\n", sp);
 
        i = top_reg;
index 6618036ae6d4697c78a93bd6d1659dee703be23c..9ae31f7e224365d054a9712551b9ac75a0b19e9e 100644 (file)
@@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type,
        u64 addr = 0;
        u32 ctrl = 0;
 
-       int err, idx = compat_ptrace_hbp_num_to_idx(num);;
+       int err, idx = compat_ptrace_hbp_num_to_idx(num);
 
        if (num & 1) {
                err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
index 76809ccd309ccaf330e45ac4b814922ae5949624..d5718a060672e1696618904c8844447daa042007 100644 (file)
@@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (tsk->ret_stack &&
                        (frame->pc == (unsigned long)return_to_handler)) {
+               if (WARN_ON_ONCE(frame->graph == -1))
+                       return -EINVAL;
+               if (frame->graph < -1)
+                       frame->graph += FTRACE_NOTRACE_DEPTH;
+
                /*
                 * This is a case where function graph tracer has
                 * modified a return address (LR) in a stack frame
index 8b8bbd3eaa52cc8df71d66ab1063427232319692..a382b2a1b84e3204b5794fb5f367ac15a0bddf43 100644 (file)
@@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
        if (end < start || flags)
                return -EINVAL;
 
-       if (!access_ok(VERIFY_READ, start, end - start))
+       if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
                return -EFAULT;
 
        return __do_compat_cache_op(start, end);
index a4391280fba9631d69cf8bda434c307956b92668..f258636273c9588dca80420341a3627cfdd61d2c 100644 (file)
@@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
        frame.fp = regs->regs[29];
        frame.pc = regs->pc;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       frame.graph = -1; /* no task info */
+       frame.graph = current->curr_ret_stack;
 #endif
        do {
                int ret = unwind_frame(NULL, &frame);
index bbb0fde2780ef780cd3c2de7a071691556d5ce6d..eb2d15147e8d3bb2be8731400990f18979022fce 100644 (file)
@@ -57,7 +57,7 @@ static const char *handler[]= {
        "Error"
 };
 
-int show_unhandled_signals = 1;
+int show_unhandled_signals = 0;
 
 static void dump_backtrace_entry(unsigned long where)
 {
@@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
        }
 #endif
 
-       if (show_unhandled_signals_ratelimited()) {
-               pr_info("%s[%d]: syscall %d\n", current->comm,
-                       task_pid_nr(current), regs->syscallno);
-               dump_instr("", regs);
-               if (user_mode(regs))
-                       __show_regs(regs);
-       }
-
        return sys_ni_syscall();
 }
 
index 3161b853f29e1d35a21b0da5a01f571c995616f3..84a019f5502293dbe0c28cc078465e4ddd6c2096 100644 (file)
@@ -933,6 +933,11 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
+
+       /* ioremap_page_range doesn't honour BBM */
+       if (pud_present(READ_ONCE(*pudp)))
+               return 0;
+
        BUG_ON(phys & ~PUD_MASK);
        set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
        return 1;
@@ -942,6 +947,11 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 {
        pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
                                        pgprot_val(mk_sect_prot(prot)));
+
+       /* ioremap_page_range doesn't honour BBM */
+       if (pmd_present(READ_ONCE(*pmdp)))
+               return 0;
+
        BUG_ON(phys & ~PMD_MASK);
        set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
        return 1;
index 1d4f1da7c58f8d51371947523e91649915e4320d..a93350451e8ece754a2f565764d47505e48b85ef 100644 (file)
@@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        off = offsetof(struct bpf_array, map.max_entries);
        emit_a64_mov_i64(tmp, off, ctx);
        emit(A64_LDR32(tmp, r2, tmp), ctx);
+       emit(A64_MOV(0, r3, r3), ctx);
        emit(A64_CMP(0, r3, tmp), ctx);
-       emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
+       emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
 
        /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *     goto out;
@@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
         */
        emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
        emit(A64_CMP(1, tcc, tmp), ctx);
-       emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
+       emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
        emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
 
        /* prog = array->ptrs[index];
index 905afeacfedf53de44b3ba08487b04b46141e564..06da9d49152a0fd78ef1bac764df62aa38e35c83 100644 (file)
@@ -44,18 +44,25 @@ struct bug_frame {
  * not be used like this with newer versions of gcc.
  */
 #define BUG()                                                          \
+do {                                                                   \
        __asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
                              "movu.w " __stringify(__LINE__) ",$r0\n\t"\
                              "jump 0f\n\t"                             \
                              ".section .rodata\n"                      \
                              "0:\t.string \"" __FILE__ "\"\n\t"        \
-                             ".previous")
+                             ".previous");                             \
+       unreachable();                                                  \
+} while (0)
 #endif
 
 #else
 
 /* This just causes an oops. */
-#define BUG() (*(int *)0 = 0)
+#define BUG()                                                          \
+do {                                                                   \
+       barrier_before_unreachable();                                   \
+       __builtin_trap();                                               \
+} while (0)
 
 #endif
 
index bd3eeb8d1cfa379a56456a3fd27909dbeaf2b55b..66b37a5327654f87a40fc277fad53d927875c355 100644 (file)
@@ -4,7 +4,11 @@
 
 #ifdef CONFIG_BUG
 #define ia64_abort()   __builtin_trap()
-#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
+#define BUG() do {                                             \
+       printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__);   \
+       barrier_before_unreachable();                           \
+       ia64_abort();                                           \
+} while (0)
 
 /* should this BUG be made generic? */
 #define HAVE_ARCH_BUG
index b7e2bf1ba4a60d1e139aedf50c7a82fc0b0235cd..275dca1435bf9f64202b0a9249b0a07138105c82 100644 (file)
@@ -8,16 +8,19 @@
 #ifndef CONFIG_SUN3
 #define BUG() do { \
        pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+       barrier_before_unreachable(); \
        __builtin_trap(); \
 } while (0)
 #else
 #define BUG() do { \
        pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+       barrier_before_unreachable(); \
        panic("BUG!"); \
 } while (0)
 #endif
 #else
 #define BUG() do { \
+       barrier_before_unreachable(); \
        __builtin_trap(); \
 } while (0)
 #endif
index 1bd5c4f00d19bd7ff4aee34789e0fbb371240a4d..c22da16d67b82f7752f6ba39291e2acd4e18fb47 100644 (file)
@@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
 
 quiet_cmd_cpp_its_S = ITS     $@
       cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
+                       -D__ASSEMBLY__ \
                        -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
                        -DVMLINUX_BINARY="\"$(3)\"" \
                        -DVMLINUX_COMPRESSION="\"$(2)\"" \
index 946681db8dc3a680e319617e43e209ba50b51e98..9a0fa66b81aca6c47dab0189cbba57b7c413a952 100644 (file)
@@ -86,7 +86,6 @@ struct compat_flock {
        compat_off_t    l_len;
        s32             l_sysid;
        compat_pid_t    l_pid;
-       short           __unused;
        s32             pad[4];
 };
 
index 3742508cc5345882510d907dc5f58b48a08ad9e7..bd5ce31936f5b196c3ce0482ee962eb93df46216 100644 (file)
@@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
 void flush_kernel_icache_range_asm(unsigned long, unsigned long);
 void flush_user_dcache_range_asm(unsigned long, unsigned long);
 void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
+void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
 void flush_kernel_dcache_page_asm(void *);
 void flush_kernel_icache_page(void *);
 
index 0e6ab6e4a4e9f87ba8ffc17f4d1c1e0041b5ab02..2dbe5580a1a4420ba693329a0f2622f68f759276 100644 (file)
@@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
 #define parisc_requires_coherency()    (0)
 #endif
 
+extern int running_on_qemu;
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_PARISC_PROCESSOR_H */
index 19c0c141bc3f9f0edd509708f978a2d7ca16c230..79089778725b317888d0ecdab4feab490207e4d3 100644 (file)
@@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
 int __flush_tlb_range(unsigned long sid, unsigned long start,
                      unsigned long end)
 {
-       unsigned long flags, size;
+       unsigned long flags;
 
-       size = (end - start);
-       if (size >= parisc_tlb_flush_threshold) {
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           end - start >= parisc_tlb_flush_threshold) {
                flush_tlb_all();
                return 1;
        }
@@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
        struct vm_area_struct *vma;
        pgd_t *pgd;
 
-       /* Flush the TLB to avoid speculation if coherency is required. */
-       if (parisc_requires_coherency())
-               flush_tlb_all();
-
        /* Flushing the whole cache on each cpu takes forever on
           rp3440, etc.  So, avoid it if the mm isn't too big.  */
-       if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           mm_total_size(mm) >= parisc_cache_flush_threshold) {
+               flush_tlb_all();
                flush_cache_all();
                return;
        }
@@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
        if (mm->context == mfsp(3)) {
                for (vma = mm->mmap; vma; vma = vma->vm_next) {
                        flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
-                       if ((vma->vm_flags & VM_EXEC) == 0)
-                               continue;
-                       flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+                       if (vma->vm_flags & VM_EXEC)
+                               flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
+                       flush_tlb_range(vma, vma->vm_start, vma->vm_end);
                }
                return;
        }
@@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
 void flush_cache_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end)
 {
-       BUG_ON(!vma->vm_mm->context);
-
-       /* Flush the TLB to avoid speculation if coherency is required. */
-       if (parisc_requires_coherency())
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           end - start >= parisc_cache_flush_threshold) {
                flush_tlb_range(vma, start, end);
-
-       if ((end - start) >= parisc_cache_flush_threshold
-           || vma->vm_mm->context != mfsp(3)) {
                flush_cache_all();
                return;
        }
@@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
        flush_user_dcache_range_asm(start, end);
        if (vma->vm_flags & VM_EXEC)
                flush_user_icache_range_asm(start, end);
+       flush_tlb_range(vma, start, end);
 }
 
 void
@@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
        BUG_ON(!vma->vm_mm->context);
 
        if (pfn_valid(pfn)) {
-               if (parisc_requires_coherency())
-                       flush_tlb_page(vma, vmaddr);
+               flush_tlb_page(vma, vmaddr);
                __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
        }
 }
@@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
 void flush_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
+       unsigned long end = start + size;
 
-       if ((unsigned long)size > parisc_cache_flush_threshold)
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           (unsigned long)size >= parisc_cache_flush_threshold) {
+               flush_tlb_kernel_range(start, end);
                flush_data_cache();
-       else
-               flush_kernel_dcache_range_asm(start, start + size);
+               return;
+       }
+
+       flush_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(flush_kernel_vmap_range);
 
 void invalidate_kernel_vmap_range(void *vaddr, int size)
 {
        unsigned long start = (unsigned long)vaddr;
+       unsigned long end = start + size;
 
-       if ((unsigned long)size > parisc_cache_flush_threshold)
+       if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
+           (unsigned long)size >= parisc_cache_flush_threshold) {
+               flush_tlb_kernel_range(start, end);
                flush_data_cache();
-       else
-               flush_kernel_dcache_range_asm(start, start + size);
+               return;
+       }
+
+       purge_kernel_dcache_range_asm(start, end);
+       flush_tlb_kernel_range(start, end);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
index bbbe360b458f511c068620db2dd670a770ea8362..fbb4e43fda05332de57bdc135ec3711d4bd5ee77 100644 (file)
@@ -138,6 +138,16 @@ $pgt_fill_loop:
        std             %dp,0x18(%r10)
 #endif
 
+#ifdef CONFIG_64BIT
+       /* Get PDCE_PROC for monarch CPU. */
+#define MEM_PDC_LO 0x388
+#define MEM_PDC_HI 0x35C
+       ldw             MEM_PDC_LO(%r0),%r3
+       ldw             MEM_PDC_HI(%r0),%r10
+       depd            %r10, 31, 32, %r3        /* move to upper word */
+#endif
+
+
 #ifdef CONFIG_SMP
        /* Set the smp rendezvous address into page zero.
        ** It would be safer to do this in init_smp_config() but
@@ -196,12 +206,6 @@ common_stext:
         ** Someday, palo might not do this for the Monarch either.
         */
 2:
-#define MEM_PDC_LO 0x388
-#define MEM_PDC_HI 0x35C
-       ldw             MEM_PDC_LO(%r0),%r3
-       ldw             MEM_PDC_HI(%r0),%r6
-       depd            %r6, 31, 32, %r3        /* move to upper word */
-
        mfctl           %cr30,%r6               /* PCX-W2 firmware bug */
 
        ldo             PDC_PSW(%r0),%arg0              /* 21 */
@@ -268,6 +272,8 @@ $install_iva:
 aligned_rfi:
        pcxt_ssm_bug
 
+       copy            %r3, %arg0      /* PDCE_PROC for smp_callin() */
+
        rsm             PSW_SM_QUIET,%r0        /* off troublesome PSW bits */
        /* Don't need NOPs, have 8 compliant insn before rfi */
 
index 2d40c4ff3f6918ae9b2e2c6af71e20658a9850e1..67b0f7532e835f4db1214c6ccecf62183eb84e50 100644 (file)
@@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
        .procend
 ENDPROC_CFI(flush_kernel_dcache_range_asm)
 
+ENTRY_CFI(purge_kernel_dcache_range_asm)
+       .proc
+       .callinfo NO_CALLS
+       .entry
+
+       ldil            L%dcache_stride, %r1
+       ldw             R%dcache_stride(%r1), %r23
+       ldo             -1(%r23), %r21
+       ANDCM           %r26, %r21, %r26
+
+1:      cmpb,COND(<<),n        %r26, %r25,1b
+       pdc,m           %r23(%r26)
+
+       sync
+       syncdma
+       bv              %r0(%r2)
+       nop
+       .exit
+
+       .procend
+ENDPROC_CFI(purge_kernel_dcache_range_asm)
+
 ENTRY_CFI(flush_user_icache_range_asm)
        .proc
        .callinfo NO_CALLS
index 30c28ab145409b5966f7237ec2b6ca07121adc10..4065b5e48c9d68e70b38da3743e219e02934fe5b 100644 (file)
@@ -292,10 +292,15 @@ smp_cpu_init(int cpunum)
  * Slaves start using C here. Indirectly called from smp_slave_stext.
  * Do what start_kernel() and main() do for boot strap processor (aka monarch)
  */
-void __init smp_callin(void)
+void __init smp_callin(unsigned long pdce_proc)
 {
        int slave_id = cpu_now_booting;
 
+#ifdef CONFIG_64BIT
+       WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
+                       | PAGE0->mem_pdc) != pdce_proc);
+#endif
+
        smp_cpu_init(slave_id);
        preempt_disable();
 
index 4b8fd6dc22dabebcf1da3b2e32381d2df3197eda..f7e684560186f9c3d5db133b8e66281c0f3c0e12 100644 (file)
@@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
        next_tick = cpuinfo->it_value;
 
        /* Calculate how many ticks have elapsed. */
+       now = mfctl(16);
        do {
                ++ticks_elapsed;
                next_tick += cpt;
-               now = mfctl(16);
        } while (next_tick - now > cpt);
 
        /* Store (in CR16 cycles) up to when we are accounting right now. */
@@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
         * if one or the other wrapped. If "now" is "bigger" we'll end up
         * with a very large unsigned number.
         */
-       while (next_tick - mfctl(16) > cpt)
+       now = mfctl(16);
+       while (next_tick - now > cpt)
                next_tick += cpt;
 
        /* Program the IT when to deliver the next interrupt.
         * Only bottom 32-bits of next_tick are writable in CR16!
         * Timer interrupt will be delivered at least a few hundred cycles
-        * after the IT fires, so if we are too close (<= 500 cycles) to the
+        * after the IT fires, so if we are too close (<= 8000 cycles) to the
         * next cycle, simply skip it.
         */
-       if (next_tick - mfctl(16) <= 500)
+       if (next_tick - now <= 8000)
                next_tick += cpt;
        mtctl(next_tick, 16);
 
@@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
         * different sockets, so mark them unstable and lower rating on
         * multi-socket SMP systems.
         */
-       if (num_online_cpus() > 1) {
+       if (num_online_cpus() > 1 && !running_on_qemu) {
                int cpu;
                unsigned long cpu0_loc;
                cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
index 48f41399fc0b8b63acd84d774a09ad2d0aba5086..cab32ee824d2ac4b7fe9adf4f3bb25533cc043c6 100644 (file)
@@ -629,7 +629,12 @@ void __init mem_init(void)
 #endif
 
        mem_init_print_info(NULL);
-#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
+
+#if 0
+       /*
+        * Do not expose the virtual kernel memory layout to userspace.
+        * But keep code for debugging purposes.
+        */
        printk("virtual kernel memory layout:\n"
               "    vmalloc : 0x%px - 0x%px   (%4ld MB)\n"
               "    memory  : 0x%px - 0x%px   (%4ld MB)\n"
index 511acfd7ab0d3e20066dcd60e22bd3048ad34119..535add3f779133fefcb1422616d0fc3fef23588b 100644 (file)
@@ -52,7 +52,7 @@
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
 #define FW_FEATURE_DRMEM_V2    ASM_CONST(0x0000000400000000)
-#define FW_FEATURE_DRC_INFO    ASM_CONST(0x0000000400000000)
+#define FW_FEATURE_DRC_INFO    ASM_CONST(0x0000000800000000)
 
 #ifndef __ASSEMBLY__
 
index beea2182d754bcd2d1f79184bcef1d390525cbb3..0c0b66fc5bfb32f6c61e8fa454fbfe1a467db89b 100644 (file)
@@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata)
        eeh_pcid_put(dev);
        pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
 #ifdef CONFIG_PCI_IOV
-       eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+       if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
+               eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
 #endif
        return NULL;
 }
index adf044daafd763a544685b92f60dbe434cc64f90..d22c41c26bb309f7e5319994dab91c60eff3e14f 100644 (file)
@@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
                .mmu = 0,
                .hash_ext = 0,
                .radix_ext = 0,
-               .byte22 = OV5_FEAT(OV5_DRC_INFO),
+               .byte22 = 0,
        },
 
        /* option vector 6: IBM PAPR hints */
index f0f5cd4d2fe7cf796336cd56cc3d8c9011be8cab..f9818d7d3381d8c072605534202fb6c91b7d58e4 100644 (file)
@@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
        if (!qpage) {
                pr_err("Failed to allocate queue %d for VCPU %d\n",
                       prio, xc->server_num);
-               return -ENOMEM;;
+               return -ENOMEM;
        }
        memset(qpage, 0, 1 << xive->q_order);
 
index 916844f99c64e59655d3372ac4e69c731f0751e9..3f1803672c9bb1062da0238999a82338f7c3c387 100644 (file)
@@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
        dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
        dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
        dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
-       dr_cell->flags = cpu_to_be32(lmb->flags);
+       dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
 }
 
 static int drmem_update_dt_v2(struct device_node *memory,
@@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
                }
 
                if (prev_lmb->aa_index != lmb->aa_index ||
-                   prev_lmb->flags != lmb->flags)
+                   drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
                        lmb_sets++;
 
                prev_lmb = lmb;
@@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
                }
 
                if (prev_lmb->aa_index != lmb->aa_index ||
-                   prev_lmb->flags != lmb->flags) {
+                   drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
                        /* end of one set, start of another */
                        dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
                        dr_cell++;
index 872d1f6dd11e179793c4394e48313649028820e1..a9636d8cba153a1fb43469c1b8070b59ae4ba210 100644 (file)
@@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
                        break;
+               case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
+                       PPC_LWZ_OFFS(r_A, r_skb, K);
+                       break;
                case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
                        PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
                        break;
index 496e47696ed0c57c413b6ce4c920c7c3beda745f..a6c92c78c9b20b9cc2507cfb5c3e749e05a6e19d 100644 (file)
@@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
        s64 rc;
 
        if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
-               return -ENODEV;;
+               return -ENODEV;
 
        pe = &phb->ioda.pe_array[pdn->pe_number];
        if (pe->tce_bypass_enabled) {
index 4fb21e17504aad72295a9e1cdffe54c5547379bd..092715b9674bb93309b3793ed4ccd2719407e5db 100644 (file)
@@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void)
                if (np && of_property_read_bool(np, "disabled"))
                        enable--;
 
+               np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+               if (np && of_property_read_bool(np, "disabled"))
+                       enable = 0;
+
                of_node_put(np);
                of_node_put(fw_features);
        }
index 372d7ada1a0c115aa9078cb1ba0d01d69a80a73d..1a527625acf78dea27f7bde430e23f02f12f5baf 100644 (file)
@@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void)
                if (types == L1D_FLUSH_NONE)
                        types = L1D_FLUSH_FALLBACK;
 
-               if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
+               if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+                   (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
                        enable = false;
        } else {
                /* Default to fallback if case hcall is not available */
index b6722c246d9c80b81e26fd30ef3f60b2281d1f20..04807c7f64cc58f7b2e4be1ec1d10b4cf1b018a8 100644 (file)
@@ -8,7 +8,6 @@ config RISCV
        select OF
        select OF_EARLY_FLATTREE
        select OF_IRQ
-       select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_WANT_FRAME_POINTERS
        select CLONE_BACKWARDS
        select COMMON_CLK
@@ -20,7 +19,6 @@ config RISCV
        select GENERIC_STRNLEN_USER
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
-       select ARCH_WANT_OPTIONAL_GPIOLIB
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_DMA_API_DEBUG
@@ -34,7 +32,6 @@ config RISCV
        select HAVE_ARCH_TRACEHOOK
        select MODULES_USE_ELF_RELA if MODULES
        select THREAD_INFO_IN_TASK
-       select RISCV_IRQ_INTC
        select RISCV_TIMER
 
 config MMU
index c0319cbf1eec58d7ea8960259838b865c23f49a1..5510366d169aea821e9f09ffe613363a2009db93 100644 (file)
@@ -34,9 +34,9 @@
 #define wmb()          RISCV_FENCE(ow,ow)
 
 /* These barriers do not need to enforce ordering on devices, just memory. */
-#define smp_mb()       RISCV_FENCE(rw,rw)
-#define smp_rmb()      RISCV_FENCE(r,r)
-#define smp_wmb()      RISCV_FENCE(w,w)
+#define __smp_mb()     RISCV_FENCE(rw,rw)
+#define __smp_rmb()    RISCV_FENCE(r,r)
+#define __smp_wmb()    RISCV_FENCE(w,w)
 
 /*
  * This is a very specific barrier: it's currently only used in two places in
index 87fc045be51fa8078f5176de0f384c79a4a687c5..56fa592cfa349b9cbf1e2e9e580bbd4c27bb9d51 100644 (file)
@@ -172,6 +172,9 @@ ENTRY(handle_exception)
        move a1, sp /* pt_regs */
        tail do_IRQ
 1:
+       /* Exceptions run with interrupts enabled */
+       csrs sstatus, SR_SIE
+
        /* Handle syscalls */
        li t0, EXC_SYSCALL
        beq s4, t0, handle_syscall
@@ -198,8 +201,6 @@ handle_syscall:
         */
        addi s2, s2, 0x4
        REG_S s2, PT_SEPC(sp)
-       /* System calls run with interrupts enabled */
-       csrs sstatus, SR_SIE
        /* Trace syscalls, but only if requested by the user. */
        REG_L t0, TASK_TI_FLAGS(tp)
        andi t0, t0, _TIF_SYSCALL_TRACE
index 226eeb190f908daef2a714944599e69c35271a4d..6e07ed37bbff794bb52664670b42c10c59810e01 100644 (file)
@@ -64,7 +64,7 @@ ENTRY(_start)
        /* Start the kernel */
        mv a0, s0
        mv a1, s1
-       call sbi_save
+       call parse_dtb
        tail start_kernel
 
 relocate:
index 09f7064e898cc7616628feee3e46bd48fcaef5e6..c11f40c1b2a881f8cf779988f4838a806104d042 100644 (file)
@@ -144,7 +144,7 @@ asmlinkage void __init setup_vm(void)
 #endif
 }
 
-void __init sbi_save(unsigned int hartid, void *dtb)
+void __init parse_dtb(unsigned int hartid, void *dtb)
 {
        early_init_dt_scan(__va(dtb));
 }
index 9c7d707158622e7f0743570db60599fa95a9de3b..07c6e81163bf5e248c1b744f69b75548aeae5d44 100644 (file)
 #include "trace.h"
 #include "trace-s390.h"
 
-
-static const intercept_handler_t instruction_handlers[256] = {
-       [0x01] = kvm_s390_handle_01,
-       [0x82] = kvm_s390_handle_lpsw,
-       [0x83] = kvm_s390_handle_diag,
-       [0xaa] = kvm_s390_handle_aa,
-       [0xae] = kvm_s390_handle_sigp,
-       [0xb2] = kvm_s390_handle_b2,
-       [0xb6] = kvm_s390_handle_stctl,
-       [0xb7] = kvm_s390_handle_lctl,
-       [0xb9] = kvm_s390_handle_b9,
-       [0xe3] = kvm_s390_handle_e3,
-       [0xe5] = kvm_s390_handle_e5,
-       [0xeb] = kvm_s390_handle_eb,
-};
-
 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
@@ -129,16 +113,39 @@ static int handle_validity(struct kvm_vcpu *vcpu)
 
 static int handle_instruction(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
        vcpu->stat.exit_instruction++;
        trace_kvm_s390_intercept_instruction(vcpu,
                                             vcpu->arch.sie_block->ipa,
                                             vcpu->arch.sie_block->ipb);
-       handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+
+       switch (vcpu->arch.sie_block->ipa >> 8) {
+       case 0x01:
+               return kvm_s390_handle_01(vcpu);
+       case 0x82:
+               return kvm_s390_handle_lpsw(vcpu);
+       case 0x83:
+               return kvm_s390_handle_diag(vcpu);
+       case 0xaa:
+               return kvm_s390_handle_aa(vcpu);
+       case 0xae:
+               return kvm_s390_handle_sigp(vcpu);
+       case 0xb2:
+               return kvm_s390_handle_b2(vcpu);
+       case 0xb6:
+               return kvm_s390_handle_stctl(vcpu);
+       case 0xb7:
+               return kvm_s390_handle_lctl(vcpu);
+       case 0xb9:
+               return kvm_s390_handle_b9(vcpu);
+       case 0xe3:
+               return kvm_s390_handle_e3(vcpu);
+       case 0xe5:
+               return kvm_s390_handle_e5(vcpu);
+       case 0xeb:
+               return kvm_s390_handle_eb(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
index aabf46f5f883d44d71cddc88b5ef28ec677ff200..b04616b57a94713a24ed19b142adb94b8c96e748 100644 (file)
@@ -169,8 +169,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
 
 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
+       const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+       const u64 ckc = vcpu->arch.sie_block->ckc;
+
+       if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+               if ((s64)ckc >= (s64)now)
+                       return 0;
+       } else if (ckc >= now) {
                return 0;
+       }
        return ckc_interrupts_enabled(vcpu);
 }
 
@@ -187,12 +194,6 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
        return kvm_s390_get_cpu_timer(vcpu) >> 63;
 }
 
-static inline int is_ioirq(unsigned long irq_type)
-{
-       return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
-               (irq_type <= IRQ_PEND_IO_ISC_0));
-}
-
 static uint64_t isc_to_isc_bits(int isc)
 {
        return (0x80 >> isc) << 24;
@@ -236,10 +237,15 @@ static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gis
        return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
 }
 
-static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
 {
        return vcpu->kvm->arch.float_int.pending_irqs |
-               vcpu->arch.local_int.pending_irqs |
+               vcpu->arch.local_int.pending_irqs;
+}
+
+static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
+{
+       return pending_irqs_no_gisa(vcpu) |
                kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
 }
 
@@ -337,7 +343,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
 {
-       if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
+       if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
                return;
        else if (psw_ioint_disabled(vcpu))
                kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
@@ -1011,24 +1017,6 @@ out:
        return rc;
 }
 
-typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
-
-static const deliver_irq_t deliver_irq_funcs[] = {
-       [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
-       [IRQ_PEND_MCHK_REP]       = __deliver_machine_check,
-       [IRQ_PEND_PROG]           = __deliver_prog,
-       [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
-       [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
-       [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
-       [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
-       [IRQ_PEND_RESTART]        = __deliver_restart,
-       [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
-       [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
-       [IRQ_PEND_EXT_SERVICE]    = __deliver_service,
-       [IRQ_PEND_PFAULT_DONE]    = __deliver_pfault_done,
-       [IRQ_PEND_VIRTIO]         = __deliver_virtio,
-};
-
 /* Check whether an external call is pending (deliverable or not) */
 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
 {
@@ -1066,13 +1054,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 
 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
 {
-       u64 now, cputm, sltime = 0;
+       const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
+       const u64 ckc = vcpu->arch.sie_block->ckc;
+       u64 cputm, sltime = 0;
 
        if (ckc_interrupts_enabled(vcpu)) {
-               now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
-               sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-               /* already expired or overflow? */
-               if (!sltime || vcpu->arch.sie_block->ckc <= now)
+               if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
+                       if ((s64)now < (s64)ckc)
+                               sltime = tod_to_ns((s64)ckc - (s64)now);
+               } else if (now < ckc) {
+                       sltime = tod_to_ns(ckc - now);
+               }
+               /* already expired */
+               if (!sltime)
                        return 0;
                if (cpu_timer_interrupts_enabled(vcpu)) {
                        cputm = kvm_s390_get_cpu_timer(vcpu);
@@ -1192,7 +1186,6 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
-       deliver_irq_t func;
        int rc = 0;
        unsigned long irq_type;
        unsigned long irqs;
@@ -1212,16 +1205,57 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        while ((irqs = deliverable_irqs(vcpu)) && !rc) {
                /* bits are in the reverse order of interrupt priority */
                irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
-               if (is_ioirq(irq_type)) {
+               switch (irq_type) {
+               case IRQ_PEND_IO_ISC_0:
+               case IRQ_PEND_IO_ISC_1:
+               case IRQ_PEND_IO_ISC_2:
+               case IRQ_PEND_IO_ISC_3:
+               case IRQ_PEND_IO_ISC_4:
+               case IRQ_PEND_IO_ISC_5:
+               case IRQ_PEND_IO_ISC_6:
+               case IRQ_PEND_IO_ISC_7:
                        rc = __deliver_io(vcpu, irq_type);
-               } else {
-                       func = deliver_irq_funcs[irq_type];
-                       if (!func) {
-                               WARN_ON_ONCE(func == NULL);
-                               clear_bit(irq_type, &li->pending_irqs);
-                               continue;
-                       }
-                       rc = func(vcpu);
+                       break;
+               case IRQ_PEND_MCHK_EX:
+               case IRQ_PEND_MCHK_REP:
+                       rc = __deliver_machine_check(vcpu);
+                       break;
+               case IRQ_PEND_PROG:
+                       rc = __deliver_prog(vcpu);
+                       break;
+               case IRQ_PEND_EXT_EMERGENCY:
+                       rc = __deliver_emergency_signal(vcpu);
+                       break;
+               case IRQ_PEND_EXT_EXTERNAL:
+                       rc = __deliver_external_call(vcpu);
+                       break;
+               case IRQ_PEND_EXT_CLOCK_COMP:
+                       rc = __deliver_ckc(vcpu);
+                       break;
+               case IRQ_PEND_EXT_CPU_TIMER:
+                       rc = __deliver_cpu_timer(vcpu);
+                       break;
+               case IRQ_PEND_RESTART:
+                       rc = __deliver_restart(vcpu);
+                       break;
+               case IRQ_PEND_SET_PREFIX:
+                       rc = __deliver_set_prefix(vcpu);
+                       break;
+               case IRQ_PEND_PFAULT_INIT:
+                       rc = __deliver_pfault_init(vcpu);
+                       break;
+               case IRQ_PEND_EXT_SERVICE:
+                       rc = __deliver_service(vcpu);
+                       break;
+               case IRQ_PEND_PFAULT_DONE:
+                       rc = __deliver_pfault_done(vcpu);
+                       break;
+               case IRQ_PEND_VIRTIO:
+                       rc = __deliver_virtio(vcpu);
+                       break;
+               default:
+                       WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
+                       clear_bit(irq_type, &li->pending_irqs);
                }
        }
 
@@ -1701,7 +1735,8 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
                kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
                break;
        case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-               kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
+               if (!(type & KVM_S390_INT_IO_AI_MASK && kvm->arch.gisa))
+                       kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
                break;
        default:
                kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
index ba4c7092335ad254fe0d385b99aa231dbecd2d53..77d7818130db49cb0108acf68a3d737e064cb918 100644 (file)
@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
                              unsigned long end);
 
+static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
+{
+       u8 delta_idx = 0;
+
+       /*
+        * The TOD jumps by delta, we have to compensate this by adding
+        * -delta to the epoch.
+        */
+       delta = -delta;
+
+       /* sign-extension - we're adding to signed values below */
+       if ((s64)delta < 0)
+               delta_idx = -1;
+
+       scb->epoch += delta;
+       if (scb->ecd & ECD_MEF) {
+               scb->epdx += delta_idx;
+               if (scb->epoch < delta)
+                       scb->epdx += 1;
+       }
+}
+
 /*
  * This callback is executed during stop_machine(). All CPUs are therefore
  * temporarily stopped. In order not to change guest behavior, we have to
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
        unsigned long long *delta = v;
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
-               kvm->arch.epoch -= *delta;
                kvm_for_each_vcpu(i, vcpu, kvm) {
-                       vcpu->arch.sie_block->epoch -= *delta;
+                       kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
+                       if (i == 0) {
+                               kvm->arch.epoch = vcpu->arch.sie_block->epoch;
+                               kvm->arch.epdx = vcpu->arch.sie_block->epdx;
+                       }
                        if (vcpu->arch.cputm_enabled)
                                vcpu->arch.cputm_start += *delta;
                        if (vcpu->arch.vsie_block)
-                               vcpu->arch.vsie_block->epoch -= *delta;
+                               kvm_clock_sync_scb(vcpu->arch.vsie_block,
+                                                  *delta);
                }
        }
        return NOTIFY_OK;
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
        if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       if (test_kvm_facility(kvm, 139))
-               kvm_s390_set_tod_clock_ext(kvm, &gtod);
-       else if (gtod.epoch_idx == 0)
-               kvm_s390_set_tod_clock(kvm, gtod.tod);
-       else
+       if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
                return -EINVAL;
+       kvm_s390_set_tod_clock(kvm, &gtod);
 
        VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
                gtod.epoch_idx, gtod.tod);
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 
 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       u64 gtod;
+       struct kvm_s390_vm_tod_clock gtod = { 0 };
 
-       if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
+       if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
+                          sizeof(gtod.tod)))
                return -EFAULT;
 
-       kvm_s390_set_tod_clock(kvm, gtod);
-       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
+       kvm_s390_set_tod_clock(kvm, &gtod);
+       VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
        return 0;
 }
 
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
        mutex_lock(&vcpu->kvm->lock);
        preempt_disable();
        vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+       vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
        preempt_enable();
        mutex_unlock(&vcpu->kvm->lock);
        if (!kvm_is_ucontrol(vcpu->kvm)) {
@@ -3021,8 +3046,8 @@ retry:
        return 0;
 }
 
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
-                                const struct kvm_s390_vm_tod_clock *gtod)
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+                           const struct kvm_s390_vm_tod_clock *gtod)
 {
        struct kvm_vcpu *vcpu;
        struct kvm_s390_tod_clock_ext htod;
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
        get_tod_clock_ext((char *)&htod);
 
        kvm->arch.epoch = gtod->tod - htod.tod;
-       kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
-
-       if (kvm->arch.epoch > gtod->tod)
-               kvm->arch.epdx -= 1;
+       kvm->arch.epdx = 0;
+       if (test_kvm_facility(kvm, 139)) {
+               kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
+               if (kvm->arch.epoch > gtod->tod)
+                       kvm->arch.epdx -= 1;
+       }
 
        kvm_s390_vcpu_block_all(kvm);
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
        mutex_unlock(&kvm->lock);
 }
 
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
-{
-       struct kvm_vcpu *vcpu;
-       int i;
-
-       mutex_lock(&kvm->lock);
-       preempt_disable();
-       kvm->arch.epoch = tod - get_tod_clock();
-       kvm_s390_vcpu_block_all(kvm);
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-       kvm_s390_vcpu_unblock_all(kvm);
-       preempt_enable();
-       mutex_unlock(&kvm->lock);
-}
-
 /**
  * kvm_arch_fault_in_page - fault-in guest page if necessary
  * @vcpu: The corresponding virtual cpu
index bd31b37b0e6f83905e7204b2eb439050aaeb1187..f55ac0ef99ea70bf3fb1d8f7eb7e303eb405b2b4 100644 (file)
@@ -19,8 +19,6 @@
 #include <asm/processor.h>
 #include <asm/sclp.h>
 
-typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
-
 /* Transactional Memory Execution related macros */
 #define IS_TE_ENABLED(vcpu)    ((vcpu->arch.sie_block->ecb & ECB_TE))
 #define TDB_FORMAT1            1
@@ -283,9 +281,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
 
 /* implemented in kvm-s390.c */
-void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
-                                const struct kvm_s390_vm_tod_clock *gtod);
-void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
+void kvm_s390_set_tod_clock(struct kvm *kvm,
+                           const struct kvm_s390_vm_tod_clock *gtod);
 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
index c4c4e157c03631a1d745ccafdfec111a6a08a49e..f0b4185158afcb229cac5b06f23f5664afffebec 100644 (file)
@@ -85,9 +85,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
 /* Handle SCK (SET CLOCK) interception */
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 {
+       struct kvm_s390_vm_tod_clock gtod = { 0 };
        int rc;
        u8 ar;
-       u64 op2, val;
+       u64 op2;
 
        vcpu->stat.instruction_sck++;
 
@@ -97,12 +98,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
        op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
        if (op2 & 7)    /* Operand must be on a doubleword boundary */
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
-       rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
+       rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
        if (rc)
                return kvm_s390_inject_prog_cond(vcpu, rc);
 
-       VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
-       kvm_s390_set_tod_clock(vcpu->kvm, val);
+       VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
+       kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
 
        kvm_s390_set_psw_cc(vcpu, 0);
        return 0;
@@ -795,55 +796,60 @@ out:
        return rc;
 }
 
-static const intercept_handler_t b2_handlers[256] = {
-       [0x02] = handle_stidp,
-       [0x04] = handle_set_clock,
-       [0x10] = handle_set_prefix,
-       [0x11] = handle_store_prefix,
-       [0x12] = handle_store_cpu_address,
-       [0x14] = kvm_s390_handle_vsie,
-       [0x21] = handle_ipte_interlock,
-       [0x29] = handle_iske,
-       [0x2a] = handle_rrbe,
-       [0x2b] = handle_sske,
-       [0x2c] = handle_test_block,
-       [0x30] = handle_io_inst,
-       [0x31] = handle_io_inst,
-       [0x32] = handle_io_inst,
-       [0x33] = handle_io_inst,
-       [0x34] = handle_io_inst,
-       [0x35] = handle_io_inst,
-       [0x36] = handle_io_inst,
-       [0x37] = handle_io_inst,
-       [0x38] = handle_io_inst,
-       [0x39] = handle_io_inst,
-       [0x3a] = handle_io_inst,
-       [0x3b] = handle_io_inst,
-       [0x3c] = handle_io_inst,
-       [0x50] = handle_ipte_interlock,
-       [0x56] = handle_sthyi,
-       [0x5f] = handle_io_inst,
-       [0x74] = handle_io_inst,
-       [0x76] = handle_io_inst,
-       [0x7d] = handle_stsi,
-       [0xb1] = handle_stfl,
-       [0xb2] = handle_lpswe,
-};
-
 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       /*
-        * A lot of B2 instructions are priviledged. Here we check for
-        * the privileged ones, that we can handle in the kernel.
-        * Anything else goes to userspace.
-        */
-       handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x02:
+               return handle_stidp(vcpu);
+       case 0x04:
+               return handle_set_clock(vcpu);
+       case 0x10:
+               return handle_set_prefix(vcpu);
+       case 0x11:
+               return handle_store_prefix(vcpu);
+       case 0x12:
+               return handle_store_cpu_address(vcpu);
+       case 0x14:
+               return kvm_s390_handle_vsie(vcpu);
+       case 0x21:
+       case 0x50:
+               return handle_ipte_interlock(vcpu);
+       case 0x29:
+               return handle_iske(vcpu);
+       case 0x2a:
+               return handle_rrbe(vcpu);
+       case 0x2b:
+               return handle_sske(vcpu);
+       case 0x2c:
+               return handle_test_block(vcpu);
+       case 0x30:
+       case 0x31:
+       case 0x32:
+       case 0x33:
+       case 0x34:
+       case 0x35:
+       case 0x36:
+       case 0x37:
+       case 0x38:
+       case 0x39:
+       case 0x3a:
+       case 0x3b:
+       case 0x3c:
+       case 0x5f:
+       case 0x74:
+       case 0x76:
+               return handle_io_inst(vcpu);
+       case 0x56:
+               return handle_sthyi(vcpu);
+       case 0x7d:
+               return handle_stsi(vcpu);
+       case 0xb1:
+               return handle_stfl(vcpu);
+       case 0xb2:
+               return handle_lpswe(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_epsw(struct kvm_vcpu *vcpu)
@@ -1105,25 +1111,22 @@ static int handle_essa(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static const intercept_handler_t b9_handlers[256] = {
-       [0x8a] = handle_ipte_interlock,
-       [0x8d] = handle_epsw,
-       [0x8e] = handle_ipte_interlock,
-       [0x8f] = handle_ipte_interlock,
-       [0xab] = handle_essa,
-       [0xaf] = handle_pfmf,
-};
-
 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       /* This is handled just as for the B2 instructions. */
-       handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x8a:
+       case 0x8e:
+       case 0x8f:
+               return handle_ipte_interlock(vcpu);
+       case 0x8d:
+               return handle_epsw(vcpu);
+       case 0xab:
+               return handle_essa(vcpu);
+       case 0xaf:
+               return handle_pfmf(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
@@ -1271,22 +1274,20 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
        return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
 }
 
-static const intercept_handler_t eb_handlers[256] = {
-       [0x2f] = handle_lctlg,
-       [0x25] = handle_stctg,
-       [0x60] = handle_ri,
-       [0x61] = handle_ri,
-       [0x62] = handle_ri,
-};
-
 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
+       case 0x25:
+               return handle_stctg(vcpu);
+       case 0x2f:
+               return handle_lctlg(vcpu);
+       case 0x60:
+       case 0x61:
+       case 0x62:
+               return handle_ri(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_tprot(struct kvm_vcpu *vcpu)
@@ -1346,10 +1347,12 @@ out_unlock:
 
 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
 {
-       /* For e5xx... instructions we only handle TPROT */
-       if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x01:
                return handle_tprot(vcpu);
-       return -EOPNOTSUPP;
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
 static int handle_sckpf(struct kvm_vcpu *vcpu)
@@ -1380,17 +1383,14 @@ static int handle_ptff(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static const intercept_handler_t x01_handlers[256] = {
-       [0x04] = handle_ptff,
-       [0x07] = handle_sckpf,
-};
-
 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
 {
-       intercept_handler_t handler;
-
-       handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
-       if (handler)
-               return handler(vcpu);
-       return -EOPNOTSUPP;
+       switch (vcpu->arch.sie_block->ipa & 0x00ff) {
+       case 0x04:
+               return handle_ptff(vcpu);
+       case 0x07:
+               return handle_sckpf(vcpu);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
index ec772700ff9659350543534272a92eb531bbd181..8961e3970901d4b06c87b20f147115b683ad5170 100644 (file)
@@ -821,6 +821,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 {
        struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
        struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+       int guest_bp_isolation;
        int rc;
 
        handle_last_fault(vcpu, vsie_page);
@@ -831,6 +832,20 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                s390_handle_mcck();
 
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+
+       /* save current guest state of bp isolation override */
+       guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
+
+       /*
+        * The guest is running with BPBC, so we have to force it on for our
+        * nested guest. This is done by enabling BPBC globally, so the BPBC
+        * control in the SCB (which the nested guest can modify) is simply
+        * ignored.
+        */
+       if (test_kvm_facility(vcpu->kvm, 82) &&
+           vcpu->arch.sie_block->fpf & FPF_BPBC)
+               set_thread_flag(TIF_ISOLATE_BP_GUEST);
+
        local_irq_disable();
        guest_enter_irqoff();
        local_irq_enable();
@@ -840,6 +855,11 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        local_irq_disable();
        guest_exit_irqoff();
        local_irq_enable();
+
+       /* restore guest state for bp isolation override */
+       if (!guest_bp_isolation)
+               clear_thread_flag(TIF_ISOLATE_BP_GUEST);
+
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        if (rc == -EINTR) {
index 715def00a436c21a1822e18251b83f6afe86941a..01d0f7fb14cce7b8e0afc17798d6cc8a1d2938d1 100644 (file)
@@ -1 +1,3 @@
-obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
+obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+endif
index 6f17528356b2f71c8a71764a0ac2715a3398a33f..ea53e418f6c045763ff030a1dc2fa73d524c38b8 100644 (file)
@@ -9,10 +9,14 @@
 void do_BUG(const char *file, int line);
 #define BUG() do {                                     \
        do_BUG(__FILE__, __LINE__);                     \
+       barrier_before_unreachable();                   \
        __builtin_trap();                               \
 } while (0)
 #else
-#define BUG()          __builtin_trap()
+#define BUG() do {                                     \
+       barrier_before_unreachable();                   \
+       __builtin_trap();                               \
+} while (0)
 #endif
 
 #define HAVE_ARCH_BUG
index c1236b187824e222a2c7fddd417272369767b06b..eb7f43f235211257bca63e83ad0e33d9fa4d7ba3 100644 (file)
@@ -430,6 +430,7 @@ config GOLDFISH
 config RETPOLINE
        bool "Avoid speculative indirect branches in kernel"
        default y
+       select STACK_VALIDATION if HAVE_STACK_VALIDATION
        help
          Compile kernel with the retpoline compiler options to guard against
          kernel-to-user data leaks by avoiding speculative indirect
index fad55160dcb94a28e60d537d3d69d471a1e10e2e..498c1b8123006add6ad685a1fea6239208509810 100644 (file)
@@ -232,10 +232,9 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
-    RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
-    ifneq ($(RETPOLINE_CFLAGS),)
-        KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
-    endif
+ifneq ($(RETPOLINE_CFLAGS),)
+  KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+endif
 endif
 
 archscripts: scripts_basic
index 353e20c3f114f3132ef18dbc03e7a8110f4e22df..886a9115af6229d058cab4f0600ebd666fc9e6d3 100644 (file)
@@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
        struct efi_uga_draw_protocol *uga = NULL, *first_uga;
        efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
        unsigned long nr_ugas;
-       u32 *handles = (u32 *)uga_handle;;
+       u32 *handles = (u32 *)uga_handle;
        efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
@@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
        struct efi_uga_draw_protocol *uga = NULL, *first_uga;
        efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
        unsigned long nr_ugas;
-       u64 *handles = (u64 *)uga_handle;;
+       u64 *handles = (u64 *)uga_handle;
        efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
index dce7092ab24a247c1165f80b17c687d255023a05..be63330c551129cb6bf0fcf07d8bf6d4600cb098 100644 (file)
@@ -97,7 +97,7 @@ For 32-bit we have the following conventions - kernel is built with
 
 #define SIZEOF_PTREGS  21*8
 
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
        /*
         * Push registers and sanitize registers of values that a
         * speculation attack might otherwise want to exploit. The
@@ -105,32 +105,41 @@ For 32-bit we have the following conventions - kernel is built with
         * could be put to use in a speculative execution gadget.
         * Interleave XOR with PUSH for better uop scheduling:
         */
+       .if \save_ret
+       pushq   %rsi            /* pt_regs->si */
+       movq    8(%rsp), %rsi   /* temporarily store the return address in %rsi */
+       movq    %rdi, 8(%rsp)   /* pt_regs->di (overwriting original return address) */
+       .else
        pushq   %rdi            /* pt_regs->di */
        pushq   %rsi            /* pt_regs->si */
+       .endif
        pushq   \rdx            /* pt_regs->dx */
        pushq   %rcx            /* pt_regs->cx */
        pushq   \rax            /* pt_regs->ax */
        pushq   %r8             /* pt_regs->r8 */
-       xorq    %r8, %r8        /* nospec   r8 */
+       xorl    %r8d, %r8d      /* nospec   r8 */
        pushq   %r9             /* pt_regs->r9 */
-       xorq    %r9, %r9        /* nospec   r9 */
+       xorl    %r9d, %r9d      /* nospec   r9 */
        pushq   %r10            /* pt_regs->r10 */
-       xorq    %r10, %r10      /* nospec   r10 */
+       xorl    %r10d, %r10d    /* nospec   r10 */
        pushq   %r11            /* pt_regs->r11 */
-       xorq    %r11, %r11      /* nospec   r11*/
+       xorl    %r11d, %r11d    /* nospec   r11*/
        pushq   %rbx            /* pt_regs->rbx */
        xorl    %ebx, %ebx      /* nospec   rbx*/
        pushq   %rbp            /* pt_regs->rbp */
        xorl    %ebp, %ebp      /* nospec   rbp*/
        pushq   %r12            /* pt_regs->r12 */
-       xorq    %r12, %r12      /* nospec   r12*/
+       xorl    %r12d, %r12d    /* nospec   r12*/
        pushq   %r13            /* pt_regs->r13 */
-       xorq    %r13, %r13      /* nospec   r13*/
+       xorl    %r13d, %r13d    /* nospec   r13*/
        pushq   %r14            /* pt_regs->r14 */
-       xorq    %r14, %r14      /* nospec   r14*/
+       xorl    %r14d, %r14d    /* nospec   r14*/
        pushq   %r15            /* pt_regs->r15 */
-       xorq    %r15, %r15      /* nospec   r15*/
+       xorl    %r15d, %r15d    /* nospec   r15*/
        UNWIND_HINT_REGS
+       .if \save_ret
+       pushq   %rsi            /* return address on top of stack */
+       .endif
 .endm
 
 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
@@ -172,12 +181,7 @@ For 32-bit we have the following conventions - kernel is built with
  */
 .macro ENCODE_FRAME_POINTER ptregs_offset=0
 #ifdef CONFIG_FRAME_POINTER
-       .if \ptregs_offset
-               leaq \ptregs_offset(%rsp), %rbp
-       .else
-               mov %rsp, %rbp
-       .endif
-       orq     $0x1, %rbp
+       leaq 1+\ptregs_offset(%rsp), %rbp
 #endif
 .endm
 
index 16c2c022540d42b9fc51ee7489383775bd319ac1..6ad064c8cf35e6fdfc9c384212a6f76c780ffd69 100644 (file)
@@ -252,8 +252,7 @@ ENTRY(__switch_to_asm)
         * exist, overwrite the RSB with entries which capture
         * speculative execution to prevent attack.
         */
-       /* Clobbers %ebx */
-       FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+       FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 #endif
 
        /* restore callee-saved registers */
index 8971bd64d515c5bb4a9b95108fd802b8418764f2..d5c7f18f79aceabf2f7c43769255cd38b3142bc3 100644 (file)
@@ -364,8 +364,7 @@ ENTRY(__switch_to_asm)
         * exist, overwrite the RSB with entries which capture
         * speculative execution to prevent attack.
         */
-       /* Clobbers %rbx */
-       FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+       FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 #endif
 
        /* restore callee-saved registers */
@@ -449,9 +448,19 @@ END(irq_entries_start)
  *
  * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
  */
-.macro ENTER_IRQ_STACK regs=1 old_rsp
+.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
        DEBUG_ENTRY_ASSERT_IRQS_OFF
+
+       .if \save_ret
+       /*
+        * If save_ret is set, the original stack contains one additional
+        * entry -- the return address. Therefore, move the address one
+        * entry below %rsp to \old_rsp.
+        */
+       leaq    8(%rsp), \old_rsp
+       .else
        movq    %rsp, \old_rsp
+       .endif
 
        .if \regs
        UNWIND_HINT_REGS base=\old_rsp
@@ -497,6 +506,15 @@ END(irq_entries_start)
        .if \regs
        UNWIND_HINT_REGS indirect=1
        .endif
+
+       .if \save_ret
+       /*
+        * Push the return address to the stack. This return address can
+        * be found at the "real" original RSP, which was offset by 8 at
+        * the beginning of this macro.
+        */
+       pushq   -8(\old_rsp)
+       .endif
 .endm
 
 /*
@@ -520,27 +538,65 @@ END(irq_entries_start)
 .endm
 
 /*
- * Interrupt entry/exit.
- *
- * Interrupt entry points save only callee clobbered registers in fast path.
+ * Interrupt entry helper function.
  *
- * Entry runs with interrupts off.
+ * Entry runs with interrupts off. Stack layout at entry:
+ * +----------------------------------------------------+
+ * | regs->ss                                          |
+ * | regs->rsp                                         |
+ * | regs->eflags                                      |
+ * | regs->cs                                          |
+ * | regs->ip                                          |
+ * +----------------------------------------------------+
+ * | regs->orig_ax = ~(interrupt number)               |
+ * +----------------------------------------------------+
+ * | return address                                    |
+ * +----------------------------------------------------+
  */
-
-/* 0(%rsp): ~(interrupt number) */
-       .macro interrupt func
+ENTRY(interrupt_entry)
+       UNWIND_HINT_FUNC
+       ASM_CLAC
        cld
 
-       testb   $3, CS-ORIG_RAX(%rsp)
+       testb   $3, CS-ORIG_RAX+8(%rsp)
        jz      1f
        SWAPGS
-       call    switch_to_thread_stack
+
+       /*
+        * Switch to the thread stack. The IRET frame and orig_ax are
+        * on the stack, as well as the return address. RDI..R12 are
+        * not (yet) on the stack and space has not (yet) been
+        * allocated for them.
+        */
+       pushq   %rdi
+
+       /* Need to switch before accessing the thread stack. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+        /*
+         * We have RDI, return address, and orig_ax on the stack on
+         * top of the IRET frame. That means offset=24
+         */
+       UNWIND_HINT_IRET_REGS base=%rdi offset=24
+
+       pushq   7*8(%rdi)               /* regs->ss */
+       pushq   6*8(%rdi)               /* regs->rsp */
+       pushq   5*8(%rdi)               /* regs->eflags */
+       pushq   4*8(%rdi)               /* regs->cs */
+       pushq   3*8(%rdi)               /* regs->ip */
+       pushq   2*8(%rdi)               /* regs->orig_ax */
+       pushq   8(%rdi)                 /* return address */
+       UNWIND_HINT_FUNC
+
+       movq    (%rdi), %rdi
 1:
 
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
 
-       testb   $3, CS(%rsp)
+       testb   $3, CS+8(%rsp)
        jz      1f
 
        /*
@@ -548,7 +604,7 @@ END(irq_entries_start)
         *
         * We need to tell lockdep that IRQs are off.  We can't do this until
         * we fix gsbase, and we should do it before enter_from_user_mode
-        * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
+        * (which can take locks).  Since TRACE_IRQS_OFF is idempotent,
         * the simplest way to handle it is to just call it twice if
         * we enter from user mode.  There's no reason to optimize this since
         * TRACE_IRQS_OFF is a no-op if lockdep is off.
@@ -558,12 +614,15 @@ END(irq_entries_start)
        CALL_enter_from_user_mode
 
 1:
-       ENTER_IRQ_STACK old_rsp=%rdi
+       ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
        /* We entered an interrupt context - irqs are off: */
        TRACE_IRQS_OFF
 
-       call    \func   /* rdi points to pt_regs */
-       .endm
+       ret
+END(interrupt_entry)
+
+
+/* Interrupt entry/exit. */
 
        /*
         * The interrupt stubs push (~vector+0x80) onto the stack and
@@ -571,9 +630,10 @@ END(irq_entries_start)
         */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-       ASM_CLAC
        addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
-       interrupt do_IRQ
+       call    interrupt_entry
+       UNWIND_HINT_REGS indirect=1
+       call    do_IRQ  /* rdi points to pt_regs */
        /* 0(%rsp): old RSP */
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_ANY)
@@ -766,10 +826,11 @@ END(common_interrupt)
 .macro apicinterrupt3 num sym do_sym
 ENTRY(\sym)
        UNWIND_HINT_IRET_REGS
-       ASM_CLAC
        pushq   $~(\num)
 .Lcommon_\sym:
-       interrupt \do_sym
+       call    interrupt_entry
+       UNWIND_HINT_REGS indirect=1
+       call    \do_sym /* rdi points to pt_regs */
        jmp     ret_from_intr
 END(\sym)
 .endm
@@ -832,34 +893,6 @@ apicinterrupt IRQ_WORK_VECTOR                      irq_work_interrupt              smp_irq_work_interrupt
  */
 #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
-/*
- * Switch to the thread stack.  This is called with the IRET frame and
- * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
- * space has not been allocated for them.)
- */
-ENTRY(switch_to_thread_stack)
-       UNWIND_HINT_FUNC
-
-       pushq   %rdi
-       /* Need to switch before accessing the thread stack. */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
-       movq    %rsp, %rdi
-       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
-       UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
-
-       pushq   7*8(%rdi)               /* regs->ss */
-       pushq   6*8(%rdi)               /* regs->rsp */
-       pushq   5*8(%rdi)               /* regs->eflags */
-       pushq   4*8(%rdi)               /* regs->cs */
-       pushq   3*8(%rdi)               /* regs->ip */
-       pushq   2*8(%rdi)               /* regs->orig_ax */
-       pushq   8(%rdi)                 /* return address */
-       UNWIND_HINT_FUNC
-
-       movq    (%rdi), %rdi
-       ret
-END(switch_to_thread_stack)
-
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
        UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -875,12 +908,8 @@ ENTRY(\sym)
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
        .endif
 
-       /* Save all registers in pt_regs */
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
-
        .if \paranoid < 2
-       testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
+       testb   $3, CS-ORIG_RAX(%rsp)           /* If coming from userspace, switch stacks */
        jnz     .Lfrom_usermode_switch_stack_\@
        .endif
 
@@ -1130,13 +1159,15 @@ idtentry machine_check          do_mce                  has_error_code=0        paranoid=1
 #endif
 
 /*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch gs if needed.
  * Use slow, but surefire "are we in kernel?" check.
  * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  */
 ENTRY(paranoid_entry)
        UNWIND_HINT_FUNC
        cld
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
        movl    $1, %ebx
        movl    $MSR_GS_BASE, %ecx
        rdmsr
@@ -1181,12 +1212,14 @@ ENTRY(paranoid_exit)
 END(paranoid_exit)
 
 /*
- * Switch gs if needed.
+ * Save all registers in pt_regs, and switch GS if needed.
  * Return: EBX=0: came from user mode; EBX=1: otherwise
  */
 ENTRY(error_entry)
-       UNWIND_HINT_REGS offset=8
+       UNWIND_HINT_FUNC
        cld
+       PUSH_AND_CLEAR_REGS save_ret=1
+       ENCODE_FRAME_POINTER 8
        testb   $3, CS+8(%rsp)
        jz      .Lerror_kernelspace
 
@@ -1577,8 +1610,6 @@ end_repeat_nmi:
         * frame to point back to repeat_nmi.
         */
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
-       PUSH_AND_CLEAR_REGS
-       ENCODE_FRAME_POINTER
 
        /*
         * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
index fd65e016e4133f5634545062d098fc25fb5c0b1d..e811dd9c5e99e1a1e61d20cce0fb9ba3cf23f535 100644 (file)
@@ -85,25 +85,25 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
        cld
 
        /*
@@ -224,25 +224,25 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
        pushq   %rbp                    /* pt_regs->cx (stashed in bp) */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp (will be overwritten) */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   $0                      /* pt_regs->r12 = 0 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   $0                      /* pt_regs->r13 = 0 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   $0                      /* pt_regs->r14 = 0 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
 
        /*
         * User mode is traced as though IRQs are on, and SYSENTER
@@ -298,9 +298,9 @@ sysret32_from_system_call:
         */
        SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
 
-       xorq    %r8, %r8
-       xorq    %r9, %r9
-       xorq    %r10, %r10
+       xorl    %r8d, %r8d
+       xorl    %r9d, %r9d
+       xorl    %r10d, %r10d
        swapgs
        sysretl
 END(entry_SYSCALL_compat)
@@ -347,10 +347,23 @@ ENTRY(entry_INT80_compat)
         */
        movl    %eax, %eax
 
+       /* switch to thread stack expects orig_ax and rdi to be pushed */
        pushq   %rax                    /* pt_regs->orig_ax */
+       pushq   %rdi                    /* pt_regs->di */
+
+       /* Need to switch before accessing the thread stack. */
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+       movq    %rsp, %rdi
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+       pushq   6*8(%rdi)               /* regs->ss */
+       pushq   5*8(%rdi)               /* regs->rsp */
+       pushq   4*8(%rdi)               /* regs->eflags */
+       pushq   3*8(%rdi)               /* regs->cs */
+       pushq   2*8(%rdi)               /* regs->ip */
+       pushq   1*8(%rdi)               /* regs->orig_ax */
 
-       /* switch to thread stack expects orig_ax to be pushed */
-       call    switch_to_thread_stack
+       movq    (%rdi), %rdi            /* restore %rdi */
 
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
@@ -358,25 +371,25 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
        pushq   $0                      /* pt_regs->r8  = 0 */
-       xorq    %r8, %r8                /* nospec   r8 */
+       xorl    %r8d, %r8d              /* nospec   r8 */
        pushq   $0                      /* pt_regs->r9  = 0 */
-       xorq    %r9, %r9                /* nospec   r9 */
+       xorl    %r9d, %r9d              /* nospec   r9 */
        pushq   $0                      /* pt_regs->r10 = 0 */
-       xorq    %r10, %r10              /* nospec   r10 */
+       xorl    %r10d, %r10d            /* nospec   r10 */
        pushq   $0                      /* pt_regs->r11 = 0 */
-       xorq    %r11, %r11              /* nospec   r11 */
+       xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
        pushq   %rbp                    /* pt_regs->rbp */
        xorl    %ebp, %ebp              /* nospec   rbp */
        pushq   %r12                    /* pt_regs->r12 */
-       xorq    %r12, %r12              /* nospec   r12 */
+       xorl    %r12d, %r12d            /* nospec   r12 */
        pushq   %r13                    /* pt_regs->r13 */
-       xorq    %r13, %r13              /* nospec   r13 */
+       xorl    %r13d, %r13d            /* nospec   r13 */
        pushq   %r14                    /* pt_regs->r14 */
-       xorq    %r14, %r14              /* nospec   r14 */
+       xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   %r15                    /* pt_regs->r15 */
-       xorq    %r15, %r15              /* nospec   r15 */
+       xorl    %r15d, %r15d            /* nospec   r15 */
        cld
 
        /*
index 4d4015ddcf2633e9e8388216f9e9c8639e2eced8..c356098b6fb92b8ff7d42b2fd813c2a8551d3db1 100644 (file)
@@ -7,6 +7,8 @@
 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
 #define _ASM_X86_MACH_DEFAULT_APM_H
 
+#include <asm/nospec-branch.h>
+
 #ifdef APM_ZERO_SEGS
 #      define APM_DO_ZERO_SEGS \
                "pushl %%ds\n\t" \
@@ -32,6 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
+       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -44,6 +47,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
                  "=S" (*esi)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
+       firmware_restrict_branch_speculation_end();
 }
 
 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -56,6 +60,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
+       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -68,6 +73,7 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
                  "=S" (si)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
+       firmware_restrict_branch_speculation_end();
        return error;
 }
 
index 4d111616524b2ee3c8d929ec98dd329b332811aa..1908214b91257f1d2442e2e6fc08b2f4c4f5bf65 100644 (file)
@@ -38,7 +38,4 @@ INDIRECT_THUNK(dx)
 INDIRECT_THUNK(si)
 INDIRECT_THUNK(di)
 INDIRECT_THUNK(bp)
-asmlinkage void __fill_rsb(void);
-asmlinkage void __clear_rsb(void);
-
 #endif /* CONFIG_RETPOLINE */
index 0dfe4d3f74e24d6655fc40f0460b9e489fb9ef69..f41079da38c55f8a2e891b044ac9ffb73919c37e 100644 (file)
 #define X86_FEATURE_SEV                        ( 7*32+20) /* AMD Secure Encrypted Virtualization */
 
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+#define X86_FEATURE_USE_IBRS_FW                ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
index 85f6ccb80b91771029f347f1875f7a6d923ef184..a399c1ebf6f0e6d974da0b773248b9c0cfe811a9 100644 (file)
@@ -6,6 +6,7 @@
 #include <asm/pgtable.h>
 #include <asm/processor-flags.h>
 #include <asm/tlb.h>
+#include <asm/nospec-branch.h>
 
 /*
  * We map the EFI regions needed for runtime services non-contiguously,
 
 extern asmlinkage unsigned long efi_call_phys(void *, ...);
 
-#define arch_efi_call_virt_setup()     kernel_fpu_begin()
-#define arch_efi_call_virt_teardown()  kernel_fpu_end()
+#define arch_efi_call_virt_setup()                                     \
+({                                                                     \
+       kernel_fpu_begin();                                             \
+       firmware_restrict_branch_speculation_start();                   \
+})
+
+#define arch_efi_call_virt_teardown()                                  \
+({                                                                     \
+       firmware_restrict_branch_speculation_end();                     \
+       kernel_fpu_end();                                               \
+})
+
 
 /*
  * Wrap all the virtual calls in a way that forces the parameters on the stack.
@@ -73,6 +84,7 @@ struct efi_scratch {
        efi_sync_low_kernel_mappings();                                 \
        preempt_disable();                                              \
        __kernel_fpu_begin();                                           \
+       firmware_restrict_branch_speculation_start();                   \
                                                                        \
        if (efi_scratch.use_pgd) {                                      \
                efi_scratch.prev_cr3 = __read_cr3();                    \
@@ -91,6 +103,7 @@ struct efi_scratch {
                __flush_tlb_all();                                      \
        }                                                               \
                                                                        \
+       firmware_restrict_branch_speculation_end();                     \
        __kernel_fpu_end();                                             \
        preempt_enable();                                               \
 })
index dd6f57a54a2626c080c8505cd670ef8c54c7bf56..b605a5b6a30c38f1bdcf647be241516b5e1310b8 100644 (file)
@@ -507,6 +507,7 @@ struct kvm_vcpu_arch {
        u64 smi_count;
        bool tpr_access_reporting;
        u64 ia32_xss;
+       u64 microcode_version;
 
        /*
         * Paging state of the vcpu
@@ -1095,6 +1096,8 @@ struct kvm_x86_ops {
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
        int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
        int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+
+       int (*get_msr_feature)(struct kvm_msr_entry *entry);
 };
 
 struct kvm_arch_async_pf {
@@ -1464,7 +1467,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
 #define put_smstate(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
-               unsigned long start, unsigned long end);
-
 #endif /* _ASM_X86_KVM_HOST_H */
index 55520cec8b27d69727092e6fd81d3a0c0f4db252..7fb1047d61c7b5175906eddc903d23866e8a7bef 100644 (file)
@@ -37,7 +37,12 @@ struct cpu_signature {
 
 struct device;
 
-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+enum ucode_state {
+       UCODE_OK        = 0,
+       UCODE_UPDATED,
+       UCODE_NFOUND,
+       UCODE_ERROR,
+};
 
 struct microcode_ops {
        enum ucode_state (*request_microcode_user) (int cpu,
@@ -54,7 +59,7 @@ struct microcode_ops {
         * are being called.
         * See also the "Synchronization" section in microcode_core.c.
         */
-       int (*apply_microcode) (int cpu);
+       enum ucode_state (*apply_microcode) (int cpu);
        int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
 };
 
index c931b88982a0ff59e3b67947cc606e452f327dc0..1de72ce514cd5561dbafea43996d909f449f8766 100644 (file)
@@ -74,6 +74,7 @@ static inline void *ldt_slot_va(int slot)
        return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
 #else
        BUG();
+       return (void *)fix_to_virt(FIX_HOLE);
 #endif
 }
 
index 76b058533e473b10d99e7a1ee4b661b5b2b2b14d..d0dabeae05059883844ae2228af62c27885cdbfa 100644 (file)
@@ -8,6 +8,50 @@
 #include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+
+#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
+#define RSB_FILL_LOOPS         16      /* To avoid underflow */
+
+/*
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version â€” two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+#define __FILL_RETURN_BUFFER(reg, nr, sp)      \
+       mov     $(nr/2), reg;                   \
+771:                                           \
+       call    772f;                           \
+773:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     773b;                           \
+772:                                           \
+       call    774f;                           \
+775:   /* speculation trap */                  \
+       pause;                                  \
+       lfence;                                 \
+       jmp     775b;                           \
+774:                                           \
+       dec     reg;                            \
+       jnz     771b;                           \
+       add     $(BITS_PER_LONG/8) * nr, sp;
+
 #ifdef __ASSEMBLY__
 
 /*
        .popsection
 .endm
 
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+.macro ANNOTATE_RETPOLINE_SAFE
+       .Lannotate_\@:
+       .pushsection .discard.retpoline_safe
+       _ASM_PTR .Lannotate_\@
+       .popsection
+.endm
+
 /*
  * These are the bare retpoline primitives for indirect jmp and call.
  * Do not use these directly; they only exist to make the ALTERNATIVE
 .macro JMP_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(jmp *\reg),                           \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),  \
                __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
-               __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        jmp     *\reg
 #endif
 .macro CALL_NOSPEC reg:req
 #ifdef CONFIG_RETPOLINE
        ANNOTATE_NOSPEC_ALTERNATIVE
-       ALTERNATIVE_2 __stringify(call *\reg),                          \
+       ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
                __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
-               __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+               __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
 #else
        call    *\reg
 #endif
 .endm
 
-/* This clobbers the BX register */
-.macro FILL_RETURN_BUFFER nr:req ftr:req
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
 #ifdef CONFIG_RETPOLINE
-       ALTERNATIVE "", "call __clear_rsb", \ftr
+       ANNOTATE_NOSPEC_ALTERNATIVE
+       ALTERNATIVE "jmp .Lskip_rsb_\@",                                \
+               __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))    \
+               \ftr
+.Lskip_rsb_\@:
 #endif
 .endm
 
        ".long 999b - .\n\t"                                    \
        ".popsection\n\t"
 
+#define ANNOTATE_RETPOLINE_SAFE                                        \
+       "999:\n\t"                                              \
+       ".pushsection .discard.retpoline_safe\n\t"              \
+       _ASM_PTR " 999b\n\t"                                    \
+       ".popsection\n\t"
+
 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
 
 /*
 # define CALL_NOSPEC                                           \
        ANNOTATE_NOSPEC_ALTERNATIVE                             \
        ALTERNATIVE(                                            \
+       ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
        "call __x86_indirect_thunk_%V[thunk_target]\n",         \
        X86_FEATURE_RETPOLINE)
@@ -156,25 +226,90 @@ extern char __indirect_thunk_end[];
 static inline void vmexit_fill_RSB(void)
 {
 #ifdef CONFIG_RETPOLINE
-       alternative_input("",
-                         "call __fill_rsb",
-                         X86_FEATURE_RETPOLINE,
-                         ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
+       unsigned long loops;
+
+       asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
+                     ALTERNATIVE("jmp 910f",
+                                 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
+                                 X86_FEATURE_RETPOLINE)
+                     "910:"
+                     : "=r" (loops), ASM_CALL_CONSTRAINT
+                     : : "memory" );
 #endif
 }
 
+#define alternative_msr_write(_msr, _val, _feature)            \
+       asm volatile(ALTERNATIVE("",                            \
+                                "movl %[msr], %%ecx\n\t"       \
+                                "movl %[val], %%eax\n\t"       \
+                                "movl $0, %%edx\n\t"           \
+                                "wrmsr",                       \
+                                _feature)                      \
+                    : : [msr] "i" (_msr), [val] "i" (_val)     \
+                    : "eax", "ecx", "edx", "memory")
+
 static inline void indirect_branch_prediction_barrier(void)
 {
-       asm volatile(ALTERNATIVE("",
-                                "movl %[msr], %%ecx\n\t"
-                                "movl %[val], %%eax\n\t"
-                                "movl $0, %%edx\n\t"
-                                "wrmsr",
-                                X86_FEATURE_USE_IBPB)
-                    : : [msr] "i" (MSR_IA32_PRED_CMD),
-                        [val] "i" (PRED_CMD_IBPB)
-                    : "eax", "ecx", "edx", "memory");
+       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+                             X86_FEATURE_USE_IBPB);
 }
 
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start()                   \
+do {                                                                   \
+       preempt_disable();                                              \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,       \
+                             X86_FEATURE_USE_IBRS_FW);                 \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end()                     \
+do {                                                                   \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,                    \
+                             X86_FEATURE_USE_IBRS_FW);                 \
+       preempt_enable();                                               \
+} while (0)
+
 #endif /* __ASSEMBLY__ */
+
+/*
+ * Below is used in the eBPF JIT compiler and emits the byte sequence
+ * for the following assembly:
+ *
+ * With retpolines configured:
+ *
+ *    callq do_rop
+ *  spec_trap:
+ *    pause
+ *    lfence
+ *    jmp spec_trap
+ *  do_rop:
+ *    mov %rax,(%rsp)
+ *    retq
+ *
+ * Without retpolines configured:
+ *
+ *    jmp *%rax
+ */
+#ifdef CONFIG_RETPOLINE
+# define RETPOLINE_RAX_BPF_JIT_SIZE    17
+# define RETPOLINE_RAX_BPF_JIT()                               \
+       EMIT1_off32(0xE8, 7);    /* callq do_rop */             \
+       /* spec_trap: */                                        \
+       EMIT2(0xF3, 0x90);       /* pause */                    \
+       EMIT3(0x0F, 0xAE, 0xE8); /* lfence */                   \
+       EMIT2(0xEB, 0xF9);       /* jmp spec_trap */            \
+       /* do_rop: */                                           \
+       EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */    \
+       EMIT1(0xC3);             /* retq */
+#else
+# define RETPOLINE_RAX_BPF_JIT_SIZE    2
+# define RETPOLINE_RAX_BPF_JIT()                               \
+       EMIT2(0xFF, 0xE0);       /* jmp *%rax */
+#endif
+
 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
index 554841fab717aef09d2b5cc57410a6eed8c2df0c..c83a2f418cea097bb2c5a9545409c5e6efcbbc50 100644 (file)
@@ -7,6 +7,7 @@
 #ifdef CONFIG_PARAVIRT
 #include <asm/pgtable_types.h>
 #include <asm/asm.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/paravirt_types.h>
 
@@ -879,23 +880,27 @@ extern void default_banner(void);
 
 #define INTERRUPT_RETURN                                               \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
-                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
 
 #define DISABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define ENABLE_INTERRUPTS(clobbers)                                    \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #ifdef CONFIG_X86_32
 #define GET_CR0_INTO_EAX                               \
        push %ecx; push %edx;                           \
+       ANNOTATE_RETPOLINE_SAFE;                                \
        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
        pop %edx; pop %ecx
 #else  /* !CONFIG_X86_32 */
@@ -917,21 +922,25 @@ extern void default_banner(void);
  */
 #define SWAPGS                                                         \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
-                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
                 )
 
 #define GET_CR2_INTO_RAX                               \
-       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
+       ANNOTATE_RETPOLINE_SAFE;                                \
+       call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
 
 #define USERGS_SYSRET64                                                        \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
                  CLBR_NONE,                                            \
-                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+                 ANNOTATE_RETPOLINE_SAFE;                                      \
+                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
 
 #ifdef CONFIG_DEBUG_ENTRY
 #define SAVE_FLAGS(clobbers)                                        \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+                 ANNOTATE_RETPOLINE_SAFE;                                  \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 #endif
index f624f1f10316c248911585f757ea5bd257e98434..180bc0bff0fbd98195b8e81ce9aa7232dde06ee2 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/desc_defs.h>
 #include <asm/kmap_types.h>
 #include <asm/pgtable_types.h>
+#include <asm/nospec-branch.h>
 
 struct page;
 struct thread_struct;
@@ -392,7 +393,9 @@ int paravirt_disable_iospace(void);
  * offset into the paravirt_patch_template structure, and can therefore be
  * freely converted back into a structure offset.
  */
-#define PARAVIRT_CALL  "call *%c[paravirt_opptr];"
+#define PARAVIRT_CALL                                  \
+       ANNOTATE_RETPOLINE_SAFE                         \
+       "call *%c[paravirt_opptr];"
 
 /*
  * These macros are intended to wrap calls through one of the paravirt
index 63c2552b6b6547b71bd7aa0934bc3c8c2cb54dc1..b444d83cfc952fc121d77599a938bbcc6c0d864b 100644 (file)
@@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 {
        pmdval_t v = native_pmd_val(pmd);
 
-       return __pmd(v | set);
+       return native_make_pmd(v | set);
 }
 
 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 {
        pmdval_t v = native_pmd_val(pmd);
 
-       return __pmd(v & ~clear);
+       return native_make_pmd(v & ~clear);
 }
 
 static inline pmd_t pmd_mkold(pmd_t pmd)
@@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
 {
        pudval_t v = native_pud_val(pud);
 
-       return __pud(v | set);
+       return native_make_pud(v | set);
 }
 
 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
 {
        pudval_t v = native_pud_val(pud);
 
-       return __pud(v & ~clear);
+       return native_make_pud(v & ~clear);
 }
 
 static inline pud_t pud_mkold(pud_t pud)
index 3696398a9475fe78500c8c0a62922620d975453c..246f15b4e64ced7b9f70fd789d1a2270214fb7a2 100644 (file)
@@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
 #else
 #include <asm-generic/pgtable-nopud.h>
 
+static inline pud_t native_make_pud(pudval_t val)
+{
+       return (pud_t) { .p4d.pgd = native_make_pgd(val) };
+}
+
 static inline pudval_t native_pud_val(pud_t pud)
 {
        return native_pgd_val(pud.p4d.pgd);
@@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
 #else
 #include <asm-generic/pgtable-nopmd.h>
 
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+       return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
+}
+
 static inline pmdval_t native_pmd_val(pmd_t pmd)
 {
        return native_pgd_val(pmd.pud.p4d.pgd);
index 1bd9ed87606f45f5a22f2510bde9ba34029a0551..b0ccd4847a58ab671476f412905a1df912029f1a 100644 (file)
@@ -977,4 +977,5 @@ bool xen_set_default_idle(void);
 
 void stop_this_cpu(void *dummy);
 void df_debug(struct pt_regs *regs, long error_code);
+void microcode_check(void);
 #endif /* _ASM_X86_PROCESSOR_H */
index 4e44250e7d0d75c6db385dbf151583b31d3e8c19..d65171120e909341516bbde5a5f9f99877c18141 100644 (file)
@@ -67,13 +67,13 @@ static __always_inline __must_check
 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
 {
        GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
-                                 r->refs.counter, "er", i, "%0", e);
+                                 r->refs.counter, "er", i, "%0", e, "cx");
 }
 
 static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
 {
        GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
-                                r->refs.counter, "%0", e);
+                                r->refs.counter, "%0", e, "cx");
 }
 
 static __always_inline __must_check
index f91c365e57c36d2454806ff21a2d336dae5c6863..4914a3e7c8035538a167c0dc23a2a33afd4a1ca2 100644 (file)
@@ -2,8 +2,7 @@
 #ifndef _ASM_X86_RMWcc
 #define _ASM_X86_RMWcc
 
-#define __CLOBBERS_MEM         "memory"
-#define __CLOBBERS_MEM_CC_CX   "memory", "cc", "cx"
+#define __CLOBBERS_MEM(clb...) "memory", ## clb
 
 #if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
 
@@ -40,18 +39,19 @@ do {                                                                        \
 #endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
 
 #define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
-       __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
+       __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
 
-#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc)            \
+#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc, clobbers...)\
        __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc,                 \
-                   __CLOBBERS_MEM_CC_CX)
+                   __CLOBBERS_MEM(clobbers))
 
 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
        __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc,                \
-                   __CLOBBERS_MEM, vcon (val))
+                   __CLOBBERS_MEM(), vcon (val))
 
-#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc)        \
+#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc,        \
+                                 clobbers...)                          \
        __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc,  \
-                   __CLOBBERS_MEM_CC_CX, vcon (val))
+                   __CLOBBERS_MEM(clobbers), vcon (val))
 
 #endif /* _ASM_X86_RMWcc */
index 197c2e6c73765c364e519d8c70a1fa953d7d5053..099414345865d588e6c55b63078f8b168fc8e907 100644 (file)
 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL     0x40000106
 
 struct hv_reenlightenment_control {
-       u64 vector:8;
-       u64 reserved1:8;
-       u64 enabled:1;
-       u64 reserved2:15;
-       u64 target_vp:32;
+       __u64 vector:8;
+       __u64 reserved1:8;
+       __u64 enabled:1;
+       __u64 reserved2:15;
+       __u64 target_vp:32;
 };
 
 #define HV_X64_MSR_TSC_EMULATION_CONTROL       0x40000107
 #define HV_X64_MSR_TSC_EMULATION_STATUS                0x40000108
 
 struct hv_tsc_emulation_control {
-       u64 enabled:1;
-       u64 reserved:63;
+       __u64 enabled:1;
+       __u64 reserved:63;
 };
 
 struct hv_tsc_emulation_status {
-       u64 inprogress:1;
-       u64 reserved:63;
+       __u64 inprogress:1;
+       __u64 reserved:63;
 };
 
 #define HV_X64_MSR_HYPERCALL_ENABLE            0x00000001
index 7a2ade4aa235380a8c28af6934d30566bb24de73..6cfa9c8cb7d650bef22010de0eca622a86364a73 100644 (file)
@@ -26,6 +26,7 @@
 #define KVM_FEATURE_PV_EOI             6
 #define KVM_FEATURE_PV_UNHALT          7
 #define KVM_FEATURE_PV_TLB_FLUSH       9
+#define KVM_FEATURE_ASYNC_PF_VMEXIT    10
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.
index 8ad2e410974f2d4b71a44f033e8c046671b89622..7c5538769f7e43f7bac9c8ff9a30b6a26ac89433 100644 (file)
@@ -1603,7 +1603,7 @@ static void __init delay_with_tsc(void)
        do {
                rep_nop();
                now = rdtsc();
-       } while ((now - start) < 40000000000UL / HZ &&
+       } while ((now - start) < 40000000000ULL / HZ &&
                time_before_eq(jiffies, end));
 }
 
index 3cc471beb50b499d89148bbdee37c697c596f743..bb6f7a2148d7781f64836a14f6f9884bd7955740 100644 (file)
@@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
 {
        struct apic_chip_data *apicd = apic_chip_data(irqd);
        struct irq_desc *desc = irq_data_to_desc(irqd);
+       bool managed = irqd_affinity_is_managed(irqd);
 
        lockdep_assert_held(&vector_lock);
 
        trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
                            apicd->cpu);
 
-       /* Setup the vector move, if required  */
-       if (apicd->vector && cpu_online(apicd->cpu)) {
+       /*
+        * If there is no vector associated or if the associated vector is
+        * the shutdown vector, which is associated to make PCI/MSI
+        * shutdown mode work, then there is nothing to release. Clear out
+        * prev_vector for this and the offlined target case.
+        */
+       apicd->prev_vector = 0;
+       if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
+               goto setnew;
+       /*
+        * If the target CPU of the previous vector is online, then mark
+        * the vector as move in progress and store it for cleanup when the
+        * first interrupt on the new vector arrives. If the target CPU is
+        * offline then the regular release mechanism via the cleanup
+        * vector is not possible and the vector can be immediately freed
+        * in the underlying matrix allocator.
+        */
+       if (cpu_online(apicd->cpu)) {
                apicd->move_in_progress = true;
                apicd->prev_vector = apicd->vector;
                apicd->prev_cpu = apicd->cpu;
        } else {
-               apicd->prev_vector = 0;
+               irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
+                               managed);
        }
 
+setnew:
        apicd->vector = newvec;
        apicd->cpu = newcpu;
        BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
index d71c8b54b696d4593ffb15ff894468ad3e524a50..bfca937bdcc36ce8d9523f03dcc92e93d3c39d5c 100644 (file)
@@ -300,6 +300,15 @@ retpoline_auto:
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
                pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
        }
+
+       /*
+        * Retpoline means the kernel is safe because it has no indirect
+        * branches. But firmware isn't, so use IBRS to protect that.
+        */
+       if (boot_cpu_has(X86_FEATURE_IBRS)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+               pr_info("Enabling Restricted Speculation for firmware calls\n");
+       }
 }
 
 #undef pr_fmt
@@ -326,8 +335,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
        if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
                return sprintf(buf, "Not affected\n");
 
-       return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+       return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
                       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
                       spectre_v2_module_string());
 }
 #endif
index 824aee0117bb5402d52fb5c8958e98b0bebfdf0c..348cf48212405077bd731e25cf7a41de585ba61d 100644 (file)
@@ -1749,3 +1749,33 @@ static int __init init_cpu_syscore(void)
        return 0;
 }
 core_initcall(init_cpu_syscore);
+
+/*
+ * The microcode loader calls this upon late microcode load to recheck features,
+ * only when microcode has been updated. Caller holds microcode_mutex and CPU
+ * hotplug lock.
+ */
+void microcode_check(void)
+{
+       struct cpuinfo_x86 info;
+
+       perf_check_microcode();
+
+       /* Reload CPUID max function as it might've changed. */
+       info.cpuid_level = cpuid_eax(0);
+
+       /*
+        * Copy all capability leafs to pick up the synthetic ones so that
+        * memcmp() below doesn't fail on that. The ones coming from CPUID will
+        * get overwritten in get_cpu_cap().
+        */
+       memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+
+       get_cpu_cap(&info);
+
+       if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
+               return;
+
+       pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+       pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+}
index bdab7d2f51af4c2a32d18d891f54b5bc60f60bd4..fca759d272a1783e76d86dfa38e213d167f39ade 100644 (file)
@@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
                goto out_common_fail;
        }
        closid = ret;
+       ret = 0;
 
        rdtgrp->closid = closid;
        list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
index 330b8462d426faad0dccdc480f34eec34cd8b92f..a998e1a7d46fdc7f2bdb4b58b11b49ab28ddc638 100644 (file)
@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        return patch_size;
 }
 
-static int apply_microcode_amd(int cpu)
+static enum ucode_state apply_microcode_amd(int cpu)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_amd *mc_amd;
@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
 
        p = find_patch(cpu);
        if (!p)
-               return 0;
+               return UCODE_NFOUND;
 
        mc_amd  = p->data;
        uci->mc = p->data;
@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
        if (rev >= mc_amd->hdr.patch_id) {
                c->microcode = rev;
                uci->cpu_sig.rev = rev;
-               return 0;
+               return UCODE_OK;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
                pr_err("CPU%d: update failed for patch_level=0x%08x\n",
                        cpu, mc_amd->hdr.patch_id);
-               return -1;
+               return UCODE_ERROR;
        }
        pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
                mc_amd->hdr.patch_id);
@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
        uci->cpu_sig.rev = mc_amd->hdr.patch_id;
        c->microcode = mc_amd->hdr.patch_id;
 
-       return 0;
+       return UCODE_UPDATED;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 319dd65f98a25530d3a55cb6ea1cf61f6c2004bc..aa1b9a422f2be0daf18a7084837165172ef0fafb 100644 (file)
@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu)
 }
 
 struct apply_microcode_ctx {
-       int err;
+       enum ucode_state err;
 };
 
 static void apply_microcode_local(void *arg)
@@ -489,31 +489,30 @@ static void __exit microcode_dev_exit(void)
 /* fake device for request_firmware */
 static struct platform_device  *microcode_pdev;
 
-static int reload_for_cpu(int cpu)
+static enum ucode_state reload_for_cpu(int cpu)
 {
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        enum ucode_state ustate;
-       int err = 0;
 
        if (!uci->valid)
-               return err;
+               return UCODE_OK;
 
        ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
-       if (ustate == UCODE_OK)
-               apply_microcode_on_target(cpu);
-       else
-               if (ustate == UCODE_ERROR)
-                       err = -EINVAL;
-       return err;
+       if (ustate != UCODE_OK)
+               return ustate;
+
+       return apply_microcode_on_target(cpu);
 }
 
 static ssize_t reload_store(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf, size_t size)
 {
+       enum ucode_state tmp_ret = UCODE_OK;
+       bool do_callback = false;
        unsigned long val;
+       ssize_t ret = 0;
        int cpu;
-       ssize_t ret = 0, tmp_ret;
 
        ret = kstrtoul(buf, 0, &val);
        if (ret)
@@ -526,15 +525,21 @@ static ssize_t reload_store(struct device *dev,
        mutex_lock(&microcode_mutex);
        for_each_online_cpu(cpu) {
                tmp_ret = reload_for_cpu(cpu);
-               if (tmp_ret != 0)
+               if (tmp_ret > UCODE_NFOUND) {
                        pr_warn("Error reloading microcode on CPU %d\n", cpu);
 
-               /* save retval of the first encountered reload error */
-               if (!ret)
-                       ret = tmp_ret;
+                       /* set retval for the first encountered reload error */
+                       if (!ret)
+                               ret = -EINVAL;
+               }
+
+               if (tmp_ret == UCODE_UPDATED)
+                       do_callback = true;
        }
-       if (!ret)
-               perf_check_microcode();
+
+       if (!ret && do_callback)
+               microcode_check();
+
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
index a15db2b4e0d66a8b5c4d2468359eeafb85401151..923054a6b76013a1474372b7810749f577b0c811 100644 (file)
@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
        return 0;
 }
 
-static int apply_microcode_intel(int cpu)
+static enum ucode_state apply_microcode_intel(int cpu)
 {
        struct microcode_intel *mc;
        struct ucode_cpu_info *uci;
@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu)
 
        /* We should bind the task to the CPU */
        if (WARN_ON(raw_smp_processor_id() != cpu))
-               return -1;
+               return UCODE_ERROR;
 
        uci = ucode_cpu_info + cpu;
        mc = uci->mc;
@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu)
                /* Look for a newer patch in our cache: */
                mc = find_patch(uci);
                if (!mc)
-                       return 0;
+                       return UCODE_NFOUND;
        }
 
        /* write microcode via MSR 0x79 */
@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu)
        if (rev != mc->hdr.rev) {
                pr_err("CPU%d update to revision 0x%x failed\n",
                       cpu, mc->hdr.rev);
-               return -1;
+               return UCODE_ERROR;
        }
 
        if (rev != prev_rev) {
@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu)
        uci->cpu_sig.rev = rev;
        c->microcode = rev;
 
-       return 0;
+       return UCODE_UPDATED;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index 04a625f0fcda322dab7c9d8459a19ee56b71c936..0f545b3cf926787bd986d763c843498540ef530d 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/nops.h>
 #include "../entry/calling.h"
 #include <asm/export.h>
+#include <asm/nospec-branch.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -134,6 +135,7 @@ ENTRY(secondary_startup_64)
 
        /* Ensure I am executing from virtual addresses */
        movq    $1f, %rax
+       ANNOTATE_RETPOLINE_SAFE
        jmp     *%rax
 1:
        UNWIND_HINT_EMPTY
index 4e37d1a851a62df3f9f841f3bbd66827af0c1920..bc1a27280c4bf77899afad4b85bf53212d385cab 100644 (file)
@@ -49,7 +49,7 @@
 
 static int kvmapf = 1;
 
-static int parse_no_kvmapf(char *arg)
+static int __init parse_no_kvmapf(char *arg)
 {
         kvmapf = 0;
         return 0;
@@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg)
 early_param("no-kvmapf", parse_no_kvmapf);
 
 static int steal_acc = 1;
-static int parse_no_stealacc(char *arg)
+static int __init parse_no_stealacc(char *arg)
 {
         steal_acc = 0;
         return 0;
@@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg)
 early_param("no-steal-acc", parse_no_stealacc);
 
 static int kvmclock_vsyscall = 1;
-static int parse_no_kvmclock_vsyscall(char *arg)
+static int __init parse_no_kvmclock_vsyscall(char *arg)
 {
         kvmclock_vsyscall = 0;
         return 0;
@@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void)
 #endif
                pa |= KVM_ASYNC_PF_ENABLED;
 
-               /* Async page fault support for L1 hypervisor is optional */
-               if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
-                       (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
-                       wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
+               if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
+                       pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
+
+               wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
                __this_cpu_write(apf_reason.enabled, 1);
                printk(KERN_INFO"KVM setup async PF for cpu %d\n",
                       smp_processor_id());
@@ -545,7 +545,8 @@ static void __init kvm_guest_init(void)
                pv_time_ops.steal_clock = kvm_steal_clock;
        }
 
-       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
+       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+           !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
                pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
 
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void)
 {
        int cpu;
 
-       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
+       if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+           !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                for_each_possible_cpu(cpu) {
                        zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
                                GFP_KERNEL, cpu_to_node(cpu));
index 1f790cf9d38fe0e10e46eaf9b5bef945d25a9370..3b7427aa7d850675905aaf636590b34107132874 100644 (file)
@@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
                                goto overflow;
                        break;
                case R_X86_64_PC32:
+               case R_X86_64_PLT32:
                        value -= (u64)address;
                        *(u32 *)location = value;
                        break;
index da0c160e558905c2164bf1d3879bd1a1cf368f34..f58336af095c9d3050e85c5dea79a164b275e420 100644 (file)
@@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                                goto overflow;
                        break;
                case R_X86_64_PC32:
+               case R_X86_64_PLT32:
                        if (*(u32 *)loc != 0)
                                goto invalid_relocation;
                        val -= (u64)loc;
index 9eee25d07586c6a310b16e3471efc02e501f93c7..ff99e2b6fc541a0faf8afaa17328679533b7838a 100644 (file)
@@ -1437,6 +1437,7 @@ static void remove_siblinginfo(int cpu)
        cpumask_clear(topology_sibling_cpumask(cpu));
        cpumask_clear(topology_core_cpumask(cpu));
        c->cpu_core_id = 0;
+       c->booted_cores = 0;
        cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
        recompute_smt_state();
 }
index 1f9188f5357cb38e45295c07ba5f2e902563257e..feb28fee6cea7f9fbad1a4b06ee9932ac8690d75 100644 (file)
@@ -5,7 +5,6 @@
 #include <asm/unwind.h>
 #include <asm/orc_types.h>
 #include <asm/orc_lookup.h>
-#include <asm/sections.h>
 
 #define orc_warn(fmt, ...) \
        printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
@@ -148,7 +147,7 @@ static struct orc_entry *orc_find(unsigned long ip)
        }
 
        /* vmlinux .init slow lookup: */
-       if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
+       if (init_kernel_text(ip))
                return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
                                  __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
 
index a0c5a69bc7c4a324078db14ad27753443d65aa85..b671fc2d0422717f06ca6291b2a76742ee45537a 100644 (file)
@@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                             (1 << KVM_FEATURE_PV_EOI) |
                             (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
                             (1 << KVM_FEATURE_PV_UNHALT) |
-                            (1 << KVM_FEATURE_PV_TLB_FLUSH);
+                            (1 << KVM_FEATURE_PV_TLB_FLUSH) |
+                            (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
 
                if (sched_info_on())
                        entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
index 924ac8ce9d5004f9db4126a81f178c2bf0e6ff40..391dda8d43b7a44d3cedd0b41edbb51ba7171bf4 100644 (file)
@@ -2002,14 +2002,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 
 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
-       struct kvm_lapic *apic;
+       struct kvm_lapic *apic = vcpu->arch.apic;
        int i;
 
-       apic_debug("%s\n", __func__);
+       if (!apic)
+               return;
 
-       ASSERT(vcpu);
-       apic = vcpu->arch.apic;
-       ASSERT(apic != NULL);
+       apic_debug("%s\n", __func__);
 
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
@@ -2165,7 +2164,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
         */
        vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
        static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
-       kvm_lapic_reset(vcpu, false);
        kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
 
        return 0;
@@ -2569,7 +2567,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 
        pe = xchg(&apic->pending_events, 0);
        if (test_bit(KVM_APIC_INIT, &pe)) {
-               kvm_lapic_reset(vcpu, true);
                kvm_vcpu_reset(vcpu, true);
                if (kvm_vcpu_is_bsp(apic->vcpu))
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
index 46ff304140c71fad1fa818324c0cda017257d2a5..f551962ac29488431b80d56aaabcec7a576a791f 100644 (file)
@@ -3029,7 +3029,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
                return RET_PF_RETRY;
        }
 
-       return -EFAULT;
+       return RET_PF_EMULATE;
 }
 
 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
index b3e488a748281aa5f3d319861ae2ab4bfca3e65c..be9c839e2c89967d689485dbb8e51763980dd151 100644 (file)
@@ -49,6 +49,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
@@ -178,6 +179,8 @@ struct vcpu_svm {
        uint64_t sysenter_eip;
        uint64_t tsc_aux;
 
+       u64 msr_decfg;
+
        u64 next_rip;
 
        u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
@@ -300,6 +303,8 @@ module_param(vgif, int, 0444);
 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
 module_param(sev, int, 0444);
 
+static u8 rsm_ins_bytes[] = "\x0f\xaa";
+
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
@@ -1383,6 +1388,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        set_intercept(svm, INTERCEPT_SKINIT);
        set_intercept(svm, INTERCEPT_WBINVD);
        set_intercept(svm, INTERCEPT_XSETBV);
+       set_intercept(svm, INTERCEPT_RSM);
 
        if (!kvm_mwait_in_guest()) {
                set_intercept(svm, INTERCEPT_MONITOR);
@@ -1902,6 +1908,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        u32 dummy;
        u32 eax = 1;
 
+       vcpu->arch.microcode_version = 0x01000065;
        svm->spec_ctrl = 0;
 
        if (!init_event) {
@@ -3699,6 +3706,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
        return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
+static int rsm_interception(struct vcpu_svm *svm)
+{
+       return x86_emulate_instruction(&svm->vcpu, 0, 0,
+                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+}
+
 static int rdpmc_interception(struct vcpu_svm *svm)
 {
        int err;
@@ -3860,6 +3873,22 @@ static int cr8_write_interception(struct vcpu_svm *svm)
        return 0;
 }
 
+static int svm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       msr->data = 0;
+
+       switch (msr->index) {
+       case MSR_F10H_DECFG:
+               if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+                       msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
+               break;
+       default:
+               return 1;
+       }
+
+       return 0;
+}
+
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -3935,9 +3964,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                msr_info->data = svm->spec_ctrl;
                break;
-       case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x01000065;
-               break;
        case MSR_F15H_IC_CFG: {
 
                int family, model;
@@ -3955,6 +3981,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        msr_info->data = 0x1E;
                }
                break;
+       case MSR_F10H_DECFG:
+               msr_info->data = svm->msr_decfg;
+               break;
        default:
                return kvm_get_msr_common(vcpu, msr_info);
        }
@@ -4133,6 +4162,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
+       case MSR_F10H_DECFG: {
+               struct kvm_msr_entry msr_entry;
+
+               msr_entry.index = msr->index;
+               if (svm_get_msr_feature(&msr_entry))
+                       return 1;
+
+               /* Check the supported bits */
+               if (data & ~msr_entry.data)
+                       return 1;
+
+               /* Don't allow the guest to change a bit, #GP */
+               if (!msr->host_initiated && (data ^ msr_entry.data))
+                       return 1;
+
+               svm->msr_decfg = data;
+               break;
+       }
        case MSR_IA32_APICBASE:
                if (kvm_vcpu_apicv_active(vcpu))
                        avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4541,7 +4588,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
        [SVM_EXIT_MWAIT]                        = mwait_interception,
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_NPF]                          = npf_interception,
-       [SVM_EXIT_RSM]                          = emulate_on_interception,
+       [SVM_EXIT_RSM]                          = rsm_interception,
        [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
        [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
 };
@@ -5355,7 +5402,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * being speculatively taken.
         */
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5464,11 +5511,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * If the L02 MSR bitmap does not intercept the MSR, then we need to
         * save it.
         */
-       if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-               rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+               svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
        if (svm->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -6236,16 +6283,18 @@ e_free:
 
 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
 {
+       void __user *measure = (void __user *)(uintptr_t)argp->data;
        struct kvm_sev_info *sev = &kvm->arch.sev_info;
        struct sev_data_launch_measure *data;
        struct kvm_sev_launch_measure params;
+       void __user *p = NULL;
        void *blob = NULL;
        int ret;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
 
-       if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+       if (copy_from_user(&params, measure, sizeof(params)))
                return -EFAULT;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -6256,17 +6305,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (!params.len)
                goto cmd;
 
-       if (params.uaddr) {
+       p = (void __user *)(uintptr_t)params.uaddr;
+       if (p) {
                if (params.len > SEV_FW_BLOB_MAX_SIZE) {
                        ret = -EINVAL;
                        goto e_free;
                }
 
-               if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
-                       ret = -EFAULT;
-                       goto e_free;
-               }
-
                ret = -ENOMEM;
                blob = kmalloc(params.len, GFP_KERNEL);
                if (!blob)
@@ -6290,13 +6335,13 @@ cmd:
                goto e_free_blob;
 
        if (blob) {
-               if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
+               if (copy_to_user(p, blob, params.len))
                        ret = -EFAULT;
        }
 
 done:
        params.len = data->len;
-       if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+       if (copy_to_user(measure, &params, sizeof(params)))
                ret = -EFAULT;
 e_free_blob:
        kfree(blob);
@@ -6597,7 +6642,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
        struct page **pages;
        void *blob, *hdr;
        unsigned long n;
-       int ret;
+       int ret, offset;
 
        if (!sev_guest(kvm))
                return -ENOTTY;
@@ -6623,6 +6668,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (!data)
                goto e_unpin_memory;
 
+       offset = params.guest_uaddr & (PAGE_SIZE - 1);
+       data->guest_address = __sme_page_pa(pages[0]) + offset;
+       data->guest_len = params.guest_len;
+
        blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
        if (IS_ERR(blob)) {
                ret = PTR_ERR(blob);
@@ -6637,8 +6686,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
                ret = PTR_ERR(hdr);
                goto e_free_blob;
        }
-       data->trans_address = __psp_pa(blob);
-       data->trans_len = params.trans_len;
+       data->hdr_address = __psp_pa(hdr);
+       data->hdr_len = params.hdr_len;
 
        data->handle = sev->handle;
        ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
@@ -6821,6 +6870,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .vcpu_unblocking = svm_vcpu_unblocking,
 
        .update_bp_intercept = update_bp_intercept,
+       .get_msr_feature = svm_get_msr_feature,
        .get_msr = svm_get_msr,
        .set_msr = svm_set_msr,
        .get_segment_base = svm_get_segment_base,
index 3dec126aa3022eb11f49d5de695d3658183a48ee..051dab74e4e928ac7bf90598c0b9a9a8d8855f13 100644 (file)
@@ -51,6 +51,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/microcode.h>
 #include <asm/nospec-branch.h>
 
 #include "trace.h"
@@ -3226,6 +3227,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
        return !(val & ~valid_bits);
 }
 
+static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       return 1;
+}
+
 /*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
@@ -4485,7 +4491,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
                              SECONDARY_EXEC_DESC);
                hw_cr4 &= ~X86_CR4_UMIP;
-       } else
+       } else if (!is_guest_mode(vcpu) ||
+                  !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
                vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
                                SECONDARY_EXEC_DESC);
 
@@ -5765,6 +5772,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        vmx->rmode.vm86_active = 0;
        vmx->spec_ctrl = 0;
 
+       vcpu->arch.microcode_version = 0x100000000ULL;
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        kvm_set_cr8(vcpu, 0);
 
@@ -9452,7 +9460,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * being speculatively taken.
         */
        if (vmx->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
@@ -9587,11 +9595,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * If the L02 MSR bitmap does not intercept the MSR, then we need to
         * save it.
         */
-       if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
-               rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+               vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
        if (vmx->spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -11199,7 +11207,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (ret)
                return ret;
 
-       if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
+       /*
+        * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
+        * by event injection, halt vcpu.
+        */
+       if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+           !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK))
                return kvm_vcpu_halt(vcpu);
 
        vmx->nested.nested_run_pending = 1;
@@ -12290,6 +12303,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .vcpu_put = vmx_vcpu_put,
 
        .update_bp_intercept = update_exception_bitmap,
+       .get_msr_feature = vmx_get_msr_feature,
        .get_msr = vmx_get_msr,
        .set_msr = vmx_set_msr,
        .get_segment_base = vmx_get_segment_base,
index c8a0b545ac20c71a464738a1dd0cd7e1c3df388e..18b5ca7a31974757f0dfdd43f5d7e0805f4f3aaa 100644 (file)
@@ -1049,6 +1049,45 @@ static u32 emulated_msrs[] = {
 
 static unsigned num_emulated_msrs;
 
+/*
+ * List of msr numbers which are used to expose MSR-based features that
+ * can be used by a hypervisor to validate requested CPU features.
+ */
+static u32 msr_based_features[] = {
+       MSR_F10H_DECFG,
+       MSR_IA32_UCODE_REV,
+};
+
+static unsigned int num_msr_based_features;
+
+static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
+{
+       switch (msr->index) {
+       case MSR_IA32_UCODE_REV:
+               rdmsrl(msr->index, msr->data);
+               break;
+       default:
+               if (kvm_x86_ops->get_msr_feature(msr))
+                       return 1;
+       }
+       return 0;
+}
+
+static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+{
+       struct kvm_msr_entry msr;
+       int r;
+
+       msr.index = index;
+       r = kvm_get_msr_feature(&msr);
+       if (r)
+               return r;
+
+       *data = msr.data;
+
+       return 0;
+}
+
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits)
@@ -2222,7 +2261,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
        switch (msr) {
        case MSR_AMD64_NB_CFG:
-       case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
        case MSR_VM_HSAVE_PA:
        case MSR_AMD64_PATCH_LOADER:
@@ -2230,6 +2268,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_DC_CFG:
                break;
 
+       case MSR_IA32_UCODE_REV:
+               if (msr_info->host_initiated)
+                       vcpu->arch.microcode_version = data;
+               break;
        case MSR_EFER:
                return set_efer(vcpu, data);
        case MSR_K7_HWCR:
@@ -2525,7 +2567,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = 0;
                break;
        case MSR_IA32_UCODE_REV:
-               msr_info->data = 0x100000000ULL;
+               msr_info->data = vcpu->arch.microcode_version;
                break;
        case MSR_MTRRcap:
        case 0x200 ... 0x2ff:
@@ -2680,13 +2722,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
                    int (*do_msr)(struct kvm_vcpu *vcpu,
                                  unsigned index, u64 *data))
 {
-       int i, idx;
+       int i;
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
        for (i = 0; i < msrs->nmsrs; ++i)
                if (do_msr(vcpu, entries[i].index, &entries[i].data))
                        break;
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
        return i;
 }
@@ -2785,6 +2825,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SET_BOOT_CPU_ID:
        case KVM_CAP_SPLIT_IRQCHIP:
        case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_GET_MSR_FEATURES:
                r = 1;
                break;
        case KVM_CAP_ADJUST_CLOCK:
@@ -2899,6 +2940,31 @@ long kvm_arch_dev_ioctl(struct file *filp,
                        goto out;
                r = 0;
                break;
+       case KVM_GET_MSR_FEATURE_INDEX_LIST: {
+               struct kvm_msr_list __user *user_msr_list = argp;
+               struct kvm_msr_list msr_list;
+               unsigned int n;
+
+               r = -EFAULT;
+               if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
+                       goto out;
+               n = msr_list.nmsrs;
+               msr_list.nmsrs = num_msr_based_features;
+               if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
+                       goto out;
+               r = -E2BIG;
+               if (n < msr_list.nmsrs)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(user_msr_list->indices, &msr_based_features,
+                                num_msr_based_features * sizeof(u32)))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_GET_MSRS:
+               r = msr_io(NULL, argp, do_get_msr_feature, 1);
+               break;
        }
        default:
                r = -EINVAL;
@@ -3636,12 +3702,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_MSRS:
+       case KVM_GET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_get_msr, 1);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
-       case KVM_SET_MSRS:
+       }
+       case KVM_SET_MSRS: {
+               int idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = msr_io(vcpu, argp, do_set_msr, 0);
+               srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
+       }
        case KVM_TPR_ACCESS_REPORTING: {
                struct kvm_tpr_access_ctl tac;
 
@@ -4464,6 +4536,19 @@ static void kvm_init_msr_list(void)
                j++;
        }
        num_emulated_msrs = j;
+
+       for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
+               struct kvm_msr_entry msr;
+
+               msr.index = msr_based_features[i];
+               if (kvm_get_msr_feature(&msr))
+                       continue;
+
+               if (j < i)
+                       msr_based_features[j] = msr_based_features[i];
+               j++;
+       }
+       num_msr_based_features = j;
 }
 
 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
@@ -8017,6 +8102,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 {
+       kvm_lapic_reset(vcpu, init_event);
+
        vcpu->arch.hflags = 0;
 
        vcpu->arch.smi_pending = 0;
@@ -8460,10 +8547,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
                        return r;
        }
 
-       if (!size) {
-               r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
-               WARN_ON(r < 0);
-       }
+       if (!size)
+               vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
 
        return 0;
 }
index 91e9700cc6dcd2152f7488740ad3f9a6e1b766c4..25a972c61b0ae9816a817eb9681f4cd374e9e32a 100644 (file)
@@ -28,7 +28,6 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
 lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
 lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
 lib-$(CONFIG_RETPOLINE) += retpoline.o
-OBJECT_FILES_NON_STANDARD_retpoline.o :=y
 
 obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
index 480edc3a5e03002dd6f0a0316477cbd7b0971cc8..c909961e678a594bd3812cb14936bdf035af2bb9 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
-#include <asm/bitsperlong.h>
 
 .macro THUNK reg
        .section .text.__x86.indirect_thunk
@@ -47,58 +46,3 @@ GENERATE_THUNK(r13)
 GENERATE_THUNK(r14)
 GENERATE_THUNK(r15)
 #endif
-
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version - two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-.macro STUFF_RSB nr:req sp:req
-       mov     $(\nr / 2), %_ASM_BX
-       .align 16
-771:
-       call    772f
-773:                                           /* speculation trap */
-       pause
-       lfence
-       jmp     773b
-       .align 16
-772:
-       call    774f
-775:                                           /* speculation trap */
-       pause
-       lfence
-       jmp     775b
-       .align 16
-774:
-       dec     %_ASM_BX
-       jnz     771b
-       add     $((BITS_PER_LONG/8) * \nr), \sp
-.endm
-
-#define RSB_FILL_LOOPS         16      /* To avoid underflow */
-
-ENTRY(__fill_rsb)
-       STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
-       ret
-END(__fill_rsb)
-EXPORT_SYMBOL_GPL(__fill_rsb)
-
-#define RSB_CLEAR_LOOPS                32      /* To forcibly overwrite all entries */
-
-ENTRY(__clear_rsb)
-       STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
-       ret
-END(__clear_rsb)
-EXPORT_SYMBOL_GPL(__clear_rsb)
index 800de815519cd1061c090e70fab09bd62bbd8ddd..c88573d90f3e9d3a521049d4655fd4857b99be3d 100644 (file)
@@ -1248,10 +1248,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
        tsk = current;
        mm = tsk->mm;
 
-       /*
-        * Detect and handle instructions that would cause a page fault for
-        * both a tracked kernel page and a userspace page.
-        */
        prefetchw(&mm->mmap_sem);
 
        if (unlikely(kmmio_fault(regs, address)))
index 01f682cf77a8b36bff9a2b45a8f8beb734b4b6c4..40a6085063d6fe87958f260bf9cbc1be3b647424 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/page.h>
 #include <asm/processor-flags.h>
 #include <asm/msr-index.h>
+#include <asm/nospec-branch.h>
 
        .text
        .code64
@@ -59,6 +60,7 @@ ENTRY(sme_encrypt_execute)
        movq    %rax, %r8               /* Workarea encryption routine */
        addq    $PAGE_SIZE, %r8         /* Workarea intermediate copy buffer */
 
+       ANNOTATE_RETPOLINE_SAFE
        call    *%rax                   /* Call the encryption routine */
 
        pop     %r12
index 4923d92f918d58fa3daddd915f49b383533a161e..45e4eb5bcbb2ab4894b12d6a948b19eb25250af4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
 #include <asm/set_memory.h>
+#include <asm/nospec-branch.h>
 #include <linux/bpf.h>
 
 /*
@@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
        EMIT2(0x89, 0xD2);                        /* mov edx, edx */
        EMIT3(0x39, 0x56,                         /* cmp dword ptr [rsi + 16], edx */
              offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 43 /* number of bytes to jump */
+#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
        label1 = cnt;
 
@@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
         */
        EMIT2_off32(0x8B, 0x85, 36);              /* mov eax, dword ptr [rbp + 36] */
        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 32
+#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
        EMIT2(X86_JA, OFFSET2);                   /* ja out */
        label2 = cnt;
        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
@@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
         *   goto out;
         */
        EMIT3(0x48, 0x85, 0xC0);                  /* test rax,rax */
-#define OFFSET3 10
+#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
        EMIT2(X86_JE, OFFSET3);                   /* je out */
        label3 = cnt;
 
@@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
         * rdi == ctx (1st arg)
         * rax == prog->bpf_func + prologue_size
         */
-       EMIT2(0xFF, 0xE0);                        /* jmp rax */
+       RETPOLINE_RAX_BPF_JIT();
 
        /* out: */
        BUILD_BUG_ON(cnt - label1 != OFFSET1);
index 174c59774cc935430583160c6c158d5de363c7ef..a7a7677265b6f73779d486da8bb5ae0b83e48062 100644 (file)
@@ -460,7 +460,7 @@ static int nmi_setup(void)
                goto fail;
 
        for_each_possible_cpu(cpu) {
-               if (!cpu)
+               if (!IS_ENABLED(CONFIG_SMP) || !cpu)
                        continue;
 
                memcpy(per_cpu(cpu_msrs, cpu).counters,
index de53bd15df5a865589e7eda181a1771fbe7297e4..24bb7598774e6aa94203e601f41329d31308692e 100644 (file)
@@ -102,7 +102,7 @@ ENTRY(startup_32)
         * don't we'll eventually crash trying to execute encrypted
         * instructions.
         */
-       bt      $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+       btl     $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
        jnc     .Ldone
        movl    $MSR_K8_SYSCFG, %ecx
        rdmsr
index 5d73c443e778b3a0e2fd79b4bf0f301cd1c8d878..220e97841e494c41e21b242a053b9ffcefeb2b4a 100644 (file)
@@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
                break;
 
        case R_X86_64_PC32:
+       case R_X86_64_PLT32:
                /*
                 * PC relative relocations don't need to be adjusted unless
                 * referencing a percpu symbol.
+                *
+                * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
                 */
                if (is_percpu_sym(sym, symname))
                        add_reloc(&relocs32neg, offset);
index c047f42552e1a61ed0a5787d904681974cc05af1..3c2c2530737efc717c9945ee1e2990e5b5bfd4c5 100644 (file)
@@ -1376,8 +1376,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
        if (!xen_initial_domain()) {
                add_preferred_console("xenboot", 0, NULL);
-               add_preferred_console("tty", 0, NULL);
-               add_preferred_console("hvc", 0, NULL);
                if (pci_xen)
                        x86_init.pci.arch_init = pci_xen_init;
        } else {
@@ -1410,6 +1408,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
 
                xen_boot_params_init_edd();
        }
+
+       add_preferred_console("tty", 0, NULL);
+       add_preferred_console("hvc", 0, NULL);
+
 #ifdef CONFIG_PCI
        /* PCI BIOS service won't work from a PV guest. */
        pci_probe &= ~PCI_PROBE_BIOS;
index 623720a111432f68e8f8c080e4085d5cab151565..732631ce250fcab756d7e4d8d39ee054cf7a4de9 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/dma-contiguous.h>
+#include <linux/dma-direct.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
                              unsigned long attrs)
 {
        unsigned long ret;
-       unsigned long uncached = 0;
+       unsigned long uncached;
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page *page = NULL;
 
@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       ret = (unsigned long)page_address(page);
+       *handle = phys_to_dma(dev, page_to_phys(page));
 
-       /* We currently don't support coherent memory outside KSEG */
+#ifdef CONFIG_MMU
+       if (PageHighMem(page)) {
+               void *p;
 
+               p = dma_common_contiguous_remap(page, size, VM_MAP,
+                                               pgprot_noncached(PAGE_KERNEL),
+                                               __builtin_return_address(0));
+               if (!p) {
+                       if (!dma_release_from_contiguous(dev, page, count))
+                               __free_pages(page, get_order(size));
+               }
+               return p;
+       }
+#endif
+       ret = (unsigned long)page_address(page);
        BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
               ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
        uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
-       *handle = virt_to_bus((void *)ret);
        __invalidate_dcache_range(ret, size);
 
        return (void *)uncached;
@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
 static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
                            dma_addr_t dma_handle, unsigned long attrs)
 {
-       unsigned long addr = (unsigned long)vaddr +
-               XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-       struct page *page = virt_to_page(addr);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-       BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
-              addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
+       unsigned long addr = (unsigned long)vaddr;
+       struct page *page;
+
+       if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
+           addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
+               addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
+               page = virt_to_page(addr);
+       } else {
+#ifdef CONFIG_MMU
+               dma_common_free_remap(vaddr, size, VM_MAP);
+#endif
+               page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
+       }
 
        if (!dma_release_from_contiguous(dev, page, count))
                __free_pages(page, get_order(size));
index d776ec0d7b22d375c4df5121a3a18f95babca749..34aead7dcb4878bf18f48cc2087fd93b5bee95ca 100644 (file)
@@ -79,19 +79,75 @@ void __init zones_init(void)
        free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
+#ifdef CONFIG_HIGHMEM
+static void __init free_area_high(unsigned long pfn, unsigned long end)
+{
+       for (; pfn < end; pfn++)
+               free_highmem_page(pfn_to_page(pfn));
+}
+
+static void __init free_highpages(void)
+{
+       unsigned long max_low = max_low_pfn;
+       struct memblock_region *mem, *res;
+
+       reset_all_zones_managed_pages();
+       /* set highmem page free */
+       for_each_memblock(memory, mem) {
+               unsigned long start = memblock_region_memory_base_pfn(mem);
+               unsigned long end = memblock_region_memory_end_pfn(mem);
+
+               /* Ignore complete lowmem entries */
+               if (end <= max_low)
+                       continue;
+
+               if (memblock_is_nomap(mem))
+                       continue;
+
+               /* Truncate partial highmem entries */
+               if (start < max_low)
+                       start = max_low;
+
+               /* Find and exclude any reserved regions */
+               for_each_memblock(reserved, res) {
+                       unsigned long res_start, res_end;
+
+                       res_start = memblock_region_reserved_base_pfn(res);
+                       res_end = memblock_region_reserved_end_pfn(res);
+
+                       if (res_end < start)
+                               continue;
+                       if (res_start < start)
+                               res_start = start;
+                       if (res_start > end)
+                               res_start = end;
+                       if (res_end > end)
+                               res_end = end;
+                       if (res_start != start)
+                               free_area_high(start, res_start);
+                       start = res_end;
+                       if (start == end)
+                               break;
+               }
+
+               /* And now free anything which remains */
+               if (start < end)
+                       free_area_high(start, end);
+       }
+}
+#else
+static void __init free_highpages(void)
+{
+}
+#endif
+
 /*
  * Initialize memory pages.
  */
 
 void __init mem_init(void)
 {
-#ifdef CONFIG_HIGHMEM
-       unsigned long tmp;
-
-       reset_all_zones_managed_pages();
-       for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
-               free_highmem_page(pfn_to_page(tmp));
-#endif
+       free_highpages();
 
        max_mapnr = max_pfn - ARCH_PFN_OFFSET;
        high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
index 4117524ca45bc76cceb163c42f752ffd8d7e1659..c2033a232a443a3b54def7e9d88b646a7ac950d5 100644 (file)
@@ -812,7 +812,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
        struct gendisk *disk;
        struct request_queue *q;
        struct blkcg_gq *blkg;
-       struct module *owner;
        unsigned int major, minor;
        int key_len, part, ret;
        char *body;
@@ -904,9 +903,7 @@ fail_unlock:
        spin_unlock_irq(q->queue_lock);
        rcu_read_unlock();
 fail:
-       owner = disk->fops->owner;
-       put_disk(disk);
-       module_put(owner);
+       put_disk_and_module(disk);
        /*
         * If queue was bypassing, we should retry.  Do so after a
         * short msleep().  It isn't strictly necessary but queue
@@ -931,13 +928,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 {
-       struct module *owner;
-
        spin_unlock_irq(ctx->disk->queue->queue_lock);
        rcu_read_unlock();
-       owner = ctx->disk->fops->owner;
-       put_disk(ctx->disk);
-       module_put(owner);
+       put_disk_and_module(ctx->disk);
 }
 EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
index 2d1a7bbe063437bfacfca43ad479c305fccf56c7..6d82c4f7fadd9466f84a2405352f8296597eaa09 100644 (file)
@@ -2434,7 +2434,7 @@ blk_qc_t submit_bio(struct bio *bio)
                unsigned int count;
 
                if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
-                       count = queue_logical_block_size(bio->bi_disk->queue);
+                       count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
                else
                        count = bio_sectors(bio);
 
index 357492712b0ea85362d2bbd6b2aa1687459cba66..16e83e6df404a24fd1a59baeb77b9c7b7cc9890c 100644 (file)
@@ -712,7 +712,6 @@ static void __blk_mq_requeue_request(struct request *rq)
 
        trace_block_rq_requeue(q, rq);
        wbt_requeue(q->rq_wb, &rq->issue_stat);
-       blk_mq_sched_requeue_request(rq);
 
        if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
                blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
@@ -725,6 +724,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
 {
        __blk_mq_requeue_request(rq);
 
+       /* this request will be re-inserted to io scheduler queue */
+       blk_mq_sched_requeue_request(rq);
+
        BUG_ON(blk_queued_rq(rq));
        blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
 }
index 88a53c188cb7338e74a9a55a44d8d0e5a2a758da..9656f9e9f99e20af13f1e175aba15f9447d9ef87 100644 (file)
@@ -547,7 +547,7 @@ static int exact_lock(dev_t devt, void *data)
 {
        struct gendisk *p = data;
 
-       if (!get_disk(p))
+       if (!get_disk_and_module(p))
                return -1;
        return 0;
 }
@@ -717,6 +717,11 @@ void del_gendisk(struct gendisk *disk)
        blk_integrity_del(disk);
        disk_del_events(disk);
 
+       /*
+        * Block lookups of the disk until all bdevs are unhashed and the
+        * disk is marked as dead (GENHD_FL_UP cleared).
+        */
+       down_write(&disk->lookup_sem);
        /* invalidate stuff */
        disk_part_iter_init(&piter, disk,
                             DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
@@ -731,6 +736,7 @@ void del_gendisk(struct gendisk *disk)
        bdev_unhash_inode(disk_devt(disk));
        set_capacity(disk, 0);
        disk->flags &= ~GENHD_FL_UP;
+       up_write(&disk->lookup_sem);
 
        if (!(disk->flags & GENHD_FL_HIDDEN))
                sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@@ -809,16 +815,28 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
 
                spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               if (part && get_disk(part_to_disk(part))) {
+               if (part && get_disk_and_module(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
                spin_unlock_bh(&ext_devt_lock);
        }
 
-       if (disk && unlikely(disk->flags & GENHD_FL_HIDDEN)) {
-               put_disk(disk);
+       if (!disk)
+               return NULL;
+
+       /*
+        * Synchronize with del_gendisk() to not return disk that is being
+        * destroyed.
+        */
+       down_read(&disk->lookup_sem);
+       if (unlikely((disk->flags & GENHD_FL_HIDDEN) ||
+                    !(disk->flags & GENHD_FL_UP))) {
+               up_read(&disk->lookup_sem);
+               put_disk_and_module(disk);
                disk = NULL;
+       } else {
+               up_read(&disk->lookup_sem);
        }
        return disk;
 }
@@ -1418,6 +1436,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
                        kfree(disk);
                        return NULL;
                }
+               init_rwsem(&disk->lookup_sem);
                disk->node_id = node_id;
                if (disk_expand_part_tbl(disk, 0)) {
                        free_part_stats(&disk->part0);
@@ -1453,7 +1472,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
 }
 EXPORT_SYMBOL(__alloc_disk_node);
 
-struct kobject *get_disk(struct gendisk *disk)
+struct kobject *get_disk_and_module(struct gendisk *disk)
 {
        struct module *owner;
        struct kobject *kobj;
@@ -1471,17 +1490,30 @@ struct kobject *get_disk(struct gendisk *disk)
        return kobj;
 
 }
-
-EXPORT_SYMBOL(get_disk);
+EXPORT_SYMBOL(get_disk_and_module);
 
 void put_disk(struct gendisk *disk)
 {
        if (disk)
                kobject_put(&disk_to_dev(disk)->kobj);
 }
-
 EXPORT_SYMBOL(put_disk);
 
+/*
+ * This is a counterpart of get_disk_and_module() and thus also of
+ * get_gendisk().
+ */
+void put_disk_and_module(struct gendisk *disk)
+{
+       if (disk) {
+               struct module *owner = disk->fops->owner;
+
+               put_disk(disk);
+               module_put(owner);
+       }
+}
+EXPORT_SYMBOL(put_disk_and_module);
+
 static void set_disk_ro_uevent(struct gendisk *gd, int ro)
 {
        char event[] = "DISK_RO=1";
index 1668506d8ed80607b6e3c279fa22195fb73571de..3884d810efd27fc73bb07659b91296ea46265252 100644 (file)
@@ -225,7 +225,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
 
        if (start + len > i_size_read(bdev->bd_inode))
                return -EINVAL;
-       truncate_inode_pages_range(mapping, start, start + len);
+       truncate_inode_pages_range(mapping, start, start + len - 1);
        return blkdev_issue_discard(bdev, start >> 9, len >> 9,
                                    GFP_KERNEL, flags);
 }
index f95c60774ce8ca613417d3ccf54bee52010752ee..0d6d25e32e1f44fda0049bb3e213b85a3debfa1f 100644 (file)
@@ -833,6 +833,7 @@ static struct elevator_type kyber_sched = {
                .limit_depth = kyber_limit_depth,
                .prepare_request = kyber_prepare_request,
                .finish_request = kyber_finish_request,
+               .requeue_request = kyber_finish_request,
                .completed_request = kyber_completed_request,
                .dispatch_request = kyber_dispatch_request,
                .has_work = kyber_has_work,
index c56f211c84400662f3e18c137b51c3f5406e20fd..8ec0ba9f538619f177eef3416711689323161a65 100644 (file)
@@ -535,13 +535,22 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
        spin_unlock(&dd->lock);
 }
 
+/*
+ * Nothing to do here. This is defined only to ensure that .finish_request
+ * method is called upon request completion.
+ */
+static void dd_prepare_request(struct request *rq, struct bio *bio)
+{
+}
+
 /*
  * For zoned block devices, write unlock the target zone of
  * completed write requests. Do this while holding the zone lock
  * spinlock so that the zone is never unlocked while deadline_fifo_request()
- * while deadline_next_request() are executing.
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
  */
-static void dd_completed_request(struct request *rq)
+static void dd_finish_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
 
@@ -756,7 +765,8 @@ static struct elevator_type mq_deadline = {
        .ops.mq = {
                .insert_requests        = dd_insert_requests,
                .dispatch_request       = dd_dispatch_request,
-               .completed_request      = dd_completed_request,
+               .prepare_request        = dd_prepare_request,
+               .finish_request         = dd_finish_request,
                .next_request           = elv_rb_latter_request,
                .former_request         = elv_rb_former_request,
                .bio_merge              = dd_bio_merge,
index 91622db9aedffd997947642a5872802312346869..08dabcd8b6aefc6844bbb9d9e9c001e6ff71fb33 100644 (file)
@@ -51,6 +51,12 @@ const char *bdevname(struct block_device *bdev, char *buf)
 
 EXPORT_SYMBOL(bdevname);
 
+const char *bio_devname(struct bio *bio, char *buf)
+{
+       return disk_name(bio->bi_disk, bio->bi_partno, buf);
+}
+EXPORT_SYMBOL(bio_devname);
+
 /*
  * There's very little reason to use this, you should really
  * have a struct block_device just about everywhere and use
index 9ed51d0c6b1d171fc2eab785ef854b64f93721fe..e4929eec547fc4141a6b078e79d5fe2688a20198 100644 (file)
@@ -490,7 +490,7 @@ static int opal_discovery0_end(struct opal_dev *dev)
 
        if (!found_com_id) {
                pr_debug("Could not find OPAL comid for device. Returning early\n");
-               return -EOPNOTSUPP;;
+               return -EOPNOTSUPP;
        }
 
        dev->comid = comid;
index 73fd99098ad7ce5f4eb12eef6111500f4a89063e..753b703ef0ef8d711e04150467e5fb3643bce799 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "blacklist.h"
 
-const char __initdata *const blacklist_hashes[] = {
+const char __initconst *const blacklist_hashes[] = {
        NULL
 };
index 1f4e25f10049c2645c2a421b07bb21fdaf1c0857..598906b1e28d37e264058276a2dfd3d9263af4b1 100644 (file)
@@ -106,6 +106,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
                pr_devel("sinfo %u: Direct signer is key %x\n",
                         sinfo->index, key_serial(key));
                x509 = NULL;
+               sig = sinfo->sig;
                goto matched;
        }
        if (PTR_ERR(key) != -ENOKEY)
index 39e6de0c2761fb03efab8f144fa1d1693e7cb035..97c77f66b20d680f9fc40dfcd6154881ef983a62 100644 (file)
@@ -270,7 +270,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
                                sinfo->index);
                        return 0;
                }
-               ret = public_key_verify_signature(p->pub, p->sig);
+               ret = public_key_verify_signature(p->pub, x509->sig);
                if (ret < 0)
                        return ret;
                x509->signer = p;
@@ -366,8 +366,7 @@ static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
  *
  *  (*) -EBADMSG if some part of the message was invalid, or:
  *
- *  (*) 0 if no signature chains were found to be blacklisted or to contain
- *     unsupported crypto, or:
+ *  (*) 0 if a signature chain passed verification, or:
  *
  *  (*) -EKEYREJECTED if a blacklisted key was encountered, or:
  *
@@ -423,8 +422,11 @@ int pkcs7_verify(struct pkcs7_message *pkcs7,
 
        for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
                ret = pkcs7_verify_one(pkcs7, sinfo);
-               if (sinfo->blacklisted && actual_ret == -ENOPKG)
-                       actual_ret = -EKEYREJECTED;
+               if (sinfo->blacklisted) {
+                       if (actual_ret == -ENOPKG)
+                               actual_ret = -EKEYREJECTED;
+                       continue;
+               }
                if (ret < 0) {
                        if (ret == -ENOPKG) {
                                sinfo->unsupported_crypto = true;
index de996586762a83c0b3214aaf5fa561bde92f65c0..e929fe1e4106c7dfaff7c2bcf3186952f449b764 100644 (file)
@@ -79,9 +79,11 @@ int public_key_verify_signature(const struct public_key *pkey,
 
        BUG_ON(!pkey);
        BUG_ON(!sig);
-       BUG_ON(!sig->digest);
        BUG_ON(!sig->s);
 
+       if (!sig->digest)
+               return -ENOPKG;
+
        alg_name = sig->pkey_algo;
        if (strcmp(sig->pkey_algo, "rsa") == 0) {
                /* The data wangled by the RSA algorithm is typically padded
index 86fb6850895283d28659127f6934b8611bcef636..7c93c7728454a224c7348cfd36df868ba5067722 100644 (file)
@@ -67,8 +67,9 @@ __setup("ca_keys=", ca_keys_setup);
  *
  * Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
  * matching parent certificate in the trusted list, -EKEYREJECTED if the
- * signature check fails or the key is blacklisted and some other error if
- * there is a matching certificate but the signature check cannot be performed.
+ * signature check fails or the key is blacklisted, -ENOPKG if the signature
+ * uses unsupported crypto, or some other error if there is a matching
+ * certificate but the signature check cannot be performed.
  */
 int restrict_link_by_signature(struct key *dest_keyring,
                               const struct key_type *type,
@@ -88,6 +89,8 @@ int restrict_link_by_signature(struct key *dest_keyring,
                return -EOPNOTSUPP;
 
        sig = payload->data[asym_auth];
+       if (!sig)
+               return -ENOPKG;
        if (!sig->auth_ids[0] && !sig->auth_ids[1])
                return -ENOKEY;
 
@@ -139,6 +142,8 @@ static int key_or_keyring_common(struct key *dest_keyring,
                return -EOPNOTSUPP;
 
        sig = payload->data[asym_auth];
+       if (!sig)
+               return -ENOPKG;
        if (!sig->auth_ids[0] && !sig->auth_ids[1])
                return -ENOKEY;
 
@@ -222,9 +227,9 @@ static int key_or_keyring_common(struct key *dest_keyring,
  *
  * Returns 0 if the new certificate was accepted, -ENOKEY if we
  * couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
  */
 int restrict_link_by_key_or_keyring(struct key *dest_keyring,
                                    const struct key_type *type,
@@ -249,9 +254,9 @@ int restrict_link_by_key_or_keyring(struct key *dest_keyring,
  *
  * Returns 0 if the new certificate was accepted, -ENOKEY if we
  * couldn't find a matching parent certificate in the trusted list,
- * -EKEYREJECTED if the signature check fails, and some other error if
- * there is a matching certificate but the signature check cannot be
- * performed.
+ * -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
+ * unsupported crypto, or some other error if there is a matching certificate
+ * but the signature check cannot be performed.
  */
 int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
                                          const struct key_type *type,
index 15e3d3c2260ddc915f49732f1076f3a7ee0f540e..764b63a5aadefe5c73ef849ee5b377b134e3841e 100644 (file)
@@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
                                        &target_thread->reply_error.work);
                                wake_up_interruptible(&target_thread->wait);
                        } else {
-                               WARN(1, "Unexpected reply error: %u\n",
-                                               target_thread->reply_error.cmd);
+                               /*
+                                * Cannot get here for normal operation, but
+                                * we can if multiple synchronous transactions
+                                * are sent without blocking for responses.
+                                * Just ignore the 2nd error in this case.
+                                */
+                               pr_warn("Unexpected reply error: %u\n",
+                                       target_thread->reply_error.cmd);
                        }
                        binder_inner_proc_unlock(target_thread->proc);
                        binder_thread_dec_tmpref(target_thread);
@@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
        int debug_id = buffer->debug_id;
 
        binder_debug(BINDER_DEBUG_TRANSACTION,
-                    "%d buffer release %d, size %zd-%zd, failed at %p\n",
+                    "%d buffer release %d, size %zd-%zd, failed at %pK\n",
                     proc->pid, buffer->debug_id,
                     buffer->data_size, buffer->offsets_size, failed_at);
 
@@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,
                                }
                        }
                        binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                                    "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+                                    "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
                                     proc->pid, thread->pid, (u64)cookie,
                                     death);
                        if (death == NULL) {
@@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc,
 
        binder_inner_proc_unlock(thread->proc);
 
+       /*
+        * This is needed to avoid races between wake_up_poll() above and
+        * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+        * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+        * lock, so we can be sure it's done after calling synchronize_rcu().
+        */
+       if (thread->looper & BINDER_LOOPER_STATE_POLL)
+               synchronize_rcu();
+
        if (send_reply)
                binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
        binder_release_work(proc, &thread->todo);
@@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp,
        bool wait_for_proc_work;
 
        thread = binder_get_thread(proc);
+       if (!thread)
+               return POLLERR;
 
        binder_inner_proc_lock(thread->proc);
        thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
        spin_lock(&t->lock);
        to_proc = t->to_proc;
        seq_printf(m,
-                  "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+                  "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
                   prefix, t->debug_id, t,
                   t->from ? t->from->proc->pid : 0,
                   t->from ? t->from->pid : 0,
@@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
        }
        if (buffer->target_node)
                seq_printf(m, " node %d", buffer->target_node->debug_id);
-       seq_printf(m, " size %zd:%zd data %p\n",
+       seq_printf(m, " size %zd:%zd data %pK\n",
                   buffer->data_size, buffer->offsets_size,
                   buffer->data);
 }
index e5aa62fcf5a8380d02866b7d9c31aa0a72f6573e..3aaf6af3ec23d7d54d5f45deb97ede64bec7d3db 100644 (file)
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (unit[drive].type->code == FD_NODRIVE)
                return NULL;
        *part = 0;
-       return get_disk(unit[drive].gendisk);
+       return get_disk_and_module(unit[drive].gendisk);
 }
 
 static int __init amiga_floppy_probe(struct platform_device *pdev)
index 8bc3b9fd8dd2be0df64d166dbc4c2b65eef03305..dfb2c2622e5a64d77e85ca9d14059c25f1840878 100644 (file)
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
                return NULL;
        *part = 0;
-       return get_disk(unit[drive].disk);
+       return get_disk_and_module(unit[drive].disk);
 }
 
 static int __init atari_floppy_init (void)
index 8028a3a7e7fd63cabb8ce47c021e90e26d09a7ba..deea78e485da05e90436d78c334da2b40e9c6bce 100644 (file)
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
 
        mutex_lock(&brd_devices_mutex);
        brd = brd_init_one(MINOR(dev) / max_part, &new);
-       kobj = brd ? get_disk(brd->brd_disk) : NULL;
+       kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
        mutex_unlock(&brd_devices_mutex);
 
        if (new)
index eae484acfbbc1d4c8228b5524b4d30070793620a..8ec7235fc93be49c48291725aec29f2c36818ade 100644 (file)
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
                return NULL;
        *part = 0;
-       return get_disk(disks[drive]);
+       return get_disk_and_module(disks[drive]);
 }
 
 static int __init do_floppy_init(void)
index d5fe720cf14940b668f8764de2bad6cf95549528..87855b5123a6307cb014f78af2ae07b43ef77cf4 100644 (file)
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
        if (err < 0)
                kobj = NULL;
        else
-               kobj = get_disk(lo->lo_disk);
+               kobj = get_disk_and_module(lo->lo_disk);
        mutex_unlock(&loop_index_mutex);
 
        *part = 0;
index 5f2a4240a204d54fc6fe87e569dc6165d5190530..86258b00a1d4d1960a67fb021babc668884a72a7 100644 (file)
@@ -1591,7 +1591,7 @@ again:
                        if (new_index < 0) {
                                mutex_unlock(&nbd_index_mutex);
                                printk(KERN_ERR "nbd: failed to add new device\n");
-                               return ret;
+                               return new_index;
                        }
                        nbd = idr_find(&nbd_index_idr, new_index);
                }
index 531a0915066b313462d6208359ecea4102397215..c61d20c9f3f8092f3aed69d859f137e7d67aef33 100644 (file)
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
        pkt->sector = new_sector;
 
        bio_reset(pkt->bio);
-       bio_set_set(pkt->bio, pd->bdev);
+       bio_set_dev(pkt->bio, pd->bdev);
        bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
        pkt->bio->bi_iter.bi_sector = new_sector;
        pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
index 84434d3ea19b8f3a7500972219b952a6b84072a2..64e066eba72e03abec38d49036d0f3677aafb2a3 100644 (file)
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
                return NULL;
 
        *part = 0;
-       return get_disk(swd->unit[drive].disk);
+       return get_disk_and_module(swd->unit[drive].disk);
 }
 
 static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
index 41c95c9b2ab436e5917eb6f83f055b91ee521044..8f9130ab5887273d8feba518fabb76524fa7b579 100644 (file)
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops =
 static struct kobject *z2_find(dev_t dev, int *part, void *data)
 {
        *part = 0;
-       return get_disk(z2ram_gendisk);
+       return get_disk_and_module(z2ram_gendisk);
 }
 
 static struct request_queue *z2_queue;
index 4d46003c46cfe5f4599a17cf726cb3d86fd8399c..cdaeeea7999cd20759dca0d5cd7f88a3ff431440 100644 (file)
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata)
        for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
                prop = of_get_property(np, sysc_dts_quirks[i].name, &len);
                if (!prop)
-                       break;
+                       continue;
 
                ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
        }
index 4d1dc8b46877ca497a198a3d988d060b9235100a..f95b9c75175bcf27825b16891ca405886e51c379 100644 (file)
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
                            size_t count)
 {
        int size = 0;
-       int expected;
+       u32 expected;
 
        if (!chip)
                return -EBUSY;
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
        }
 
        expected = be32_to_cpu(*(__be32 *)(buf + 2));
-       if (expected > count) {
+       if (expected > count || expected < TPM_HEADER_SIZE) {
                size = -EIO;
                goto out;
        }
index 76df4fbcf089c2371b2ad8b08f31bf446b5c488b..9e80a953d6933ea05668636b6abcd444eff72b0a 100644 (file)
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
                        break;
 
                recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
+               if (recd > num_bytes) {
+                       total = -EFAULT;
+                       break;
+               }
 
                rlength = be32_to_cpu(tpm_cmd.header.out.length);
                if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
index c17e75348a991e355236f2040f931e940541049f..a700f8f9ead797df39de4b5ef946fb5c2c65c95b 100644 (file)
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
        if (!rc) {
                data_len = be16_to_cpup(
                        (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+               if (data_len < MIN_KEY_SIZE ||  data_len > MAX_KEY_SIZE + 1) {
+                       rc = -EFAULT;
+                       goto out;
+               }
 
                rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
                                        ->header.out.length);
index c1dd39eaaeebb1f474d2d9f8b0d1e98e3559f27f..6116cd05e2287999c69ea4c5432098f159a44379 100644 (file)
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        int size = 0;
-       int expected, status;
+       int status;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                size = -EIO;
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        }
 
        expected = be32_to_cpu(*(__be32 *)(buf + 2));
-       if ((size_t) expected > count) {
+       if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
                size = -EIO;
                goto out;
        }
index c6428771841f814a891719fab16e92d7e0723fc7..caa86b19c76dd7007a3975756dec8fe069a31db3 100644 (file)
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        struct device *dev = chip->dev.parent;
        struct i2c_client *client = to_i2c_client(dev);
        s32 rc;
-       int expected, status, burst_count, retries, size = 0;
+       int status;
+       int burst_count;
+       int retries;
+       int size = 0;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                i2c_nuvoton_ready(chip);    /* return to idle */
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
                 * to machine native
                 */
                expected = be32_to_cpu(*(__be32 *) (buf + 2));
-               if (expected > count) {
+               if (expected > count || expected < size) {
                        dev_err(dev, "%s() expected > count\n", __func__);
                        size = -EIO;
                        continue;
index 183a5f54d875d072b98e7aead6563f3dd3d95a62..da074e3db19be5c895f80a403aabb992e9a14d29 100644 (file)
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
        int size = 0;
-       int expected, status;
+       int status;
+       u32 expected;
 
        if (count < TPM_HEADER_SIZE) {
                size = -EIO;
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
        }
 
        expected = be32_to_cpu(*(__be32 *) (buf + 2));
-       if (expected > count) {
+       if (expected > count || expected < TPM_HEADER_SIZE) {
                size = -EIO;
                goto out;
        }
index a04808a21d4ec9eef5d3c6d5e7ed140945d1001b..65e18c86d9b9c1724394f63861752f5bd74645b7 100644 (file)
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)
        } else if (of_property_read_u32(node, "clock-frequency",
                                        &gic_frequency)) {
                pr_err("GIC frequency not specified.\n");
-               return -EINVAL;;
+               return -EINVAL;
        }
        gic_timer_irq = irq_of_parse_and_map(node, 0);
        if (!gic_timer_irq) {
                pr_err("GIC timer IRQ not specified.\n");
-               return -EINVAL;;
+               return -EINVAL;
        }
 
        ret = __gic_clocksource_init();
index 2a3fe83ec3377cc160570fc9ed6e0e5375f57f7a..3b56ea3f52afc8ee47ca21c38cd0c6aa7ec80021 100644 (file)
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)
        timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
        if (IS_ERR(timer_base)) {
                pr_err("Can't map registers\n");
-               return PTR_ERR(timer_base);;
+               return PTR_ERR(timer_base);
        }
 
        irq = irq_of_parse_and_map(node, 0);
index 3a88e33b0cfed86e487888bc203d9823fcdeb9c0..fb586e09682d84704a28748673cdc4c571c28105 100644 (file)
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ
 
 config ARM_SCPI_CPUFREQ
        tristate "SCPI based CPUfreq driver"
-       depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+       depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
        help
-         This adds the CPUfreq driver support for ARM big.LITTLE platforms
-         using SCPI protocol for CPU power management.
+         This adds the CPUfreq driver support for ARM platforms using SCPI
+         protocol for CPU power management.
 
          This driver uses SCPI Message Protocol driver to interact with the
          firmware providing the CPU DVFS functionality.
index 7b596fa38ad2de4b7d9cf075ce69a876b4f2e35f..6bebc1f9f55aa65dc1718e47ddea4c0439b42cb0 100644 (file)
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
 static int s3c_cpufreq_init(struct cpufreq_policy *policy)
 {
        policy->clk = clk_arm;
-       return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
+
+       policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+       if (ftab)
+               return cpufreq_table_validate_and_show(policy, ftab);
+
+       return 0;
 }
 
 static int __init s3c_cpufreq_initclks(void)
index c32a833e1b00542fd3f49bf758e96f57ee6fa478..d300a163945f53476b4a751f3c208d423717d55f 100644 (file)
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
 static int
 scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
 {
+       unsigned long freq = policy->freq_table[index].frequency;
        struct scpi_data *priv = policy->driver_data;
-       u64 rate = policy->freq_table[index].frequency * 1000;
+       u64 rate = freq * 1000;
        int ret;
 
        ret = clk_set_rate(priv->clk, rate);
-       if (!ret && (clk_get_rate(priv->clk) != rate))
-               ret = -EIO;
 
-       return ret;
+       if (ret)
+               return ret;
+
+       if (clk_get_rate(priv->clk) != rate)
+               return -EIO;
+
+       arch_set_freq_scale(policy->related_cpus, freq,
+                           policy->cpuinfo.max_freq);
+
+       return 0;
 }
 
 static int
index fcfa5b1eae6169b44398b20442d95532d924821c..b3afb6cc9d72278b35eadb239e280b5aa311c3f9 100644 (file)
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)
 {
        int ret;
 
-       ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
+       ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
        if (ret)
                return ret;
 
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
                        return rc;
        }
 
-       return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
+       return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
 }
 
 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
                        return rc;
        }
 
-       return __sev_do_cmd_locked(cmd, 0, &argp->error);
+       return __sev_do_cmd_locked(cmd, NULL, &argp->error);
 }
 
 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);
 
 int sev_guest_df_flush(int *error)
 {
-       return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
+       return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
 }
 EXPORT_SYMBOL_GPL(sev_guest_df_flush);
 
index 188f44b7eb27edd2f45977b4e59ca17f2cd3f997..5d64c08b7f47ef412916b804256716f6f8016779 100644 (file)
@@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
        uint32_t aes_control;
        unsigned long flags;
        int err;
+       u8 *iv;
 
        aes_control = SSS_AES_KEY_CHANGE_MODE;
        if (mode & FLAGS_AES_DECRYPT)
                aes_control |= SSS_AES_MODE_DECRYPT;
 
-       if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+       if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
                aes_control |= SSS_AES_CHAIN_MODE_CBC;
-       else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+               iv = req->info;
+       } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
                aes_control |= SSS_AES_CHAIN_MODE_CTR;
+               iv = req->info;
+       } else {
+               iv = NULL; /* AES_ECB */
+       }
 
        if (dev->ctx->keylen == AES_KEYSIZE_192)
                aes_control |= SSS_AES_KEY_SIZE_192;
@@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
                goto outdata_error;
 
        SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
-       s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+       s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
 
        s5p_set_dma_indata(dev,  dev->sg_src);
        s5p_set_dma_outdata(dev, dev->sg_dst);
index f34430f99fd805414085fea26540f3c152dd6b0c..872100215ca00f0f4703c025b8e52e637c85c710 100644 (file)
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
  * sbridge structs
  */
 
-#define NUM_CHANNELS           4       /* Max channels per MC */
+#define NUM_CHANNELS           6       /* Max channels per MC */
 #define MAX_DIMMS              3       /* Max DIMMS per channel */
 #define KNL_MAX_CHAS           38      /* KNL max num. of Cache Home Agents */
 #define KNL_MAX_CHANNELS       6       /* KNL max num. of PCI channels */
index 0a44d43802fe1221971d5e61fc0810e2090627cc..3ec4c715e24053c9cbe8ea770d0f4bdfbb69dffb 100644 (file)
@@ -1,7 +1,6 @@
 /*
  * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
  *
- * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
  * Copyright (C) 2015 Intel Corporation
  * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
  *
@@ -98,15 +97,13 @@ struct axp288_extcon_info {
        struct device *dev;
        struct regmap *regmap;
        struct regmap_irq_chip_data *regmap_irqc;
-       struct delayed_work det_work;
        int irq[EXTCON_IRQ_END];
        struct extcon_dev *edev;
        unsigned int previous_cable;
-       bool first_detect_done;
 };
 
 /* Power up/down reason string array */
-static char *axp288_pwr_up_down_info[] = {
+static const char * const axp288_pwr_up_down_info[] = {
        "Last wake caused by user pressing the power button",
        "Last wake caused by a charger insertion",
        "Last wake caused by a battery insertion",
@@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = {
  */
 static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
 {
-       char **rsi;
+       const char * const *rsi;
        unsigned int val, i, clear_mask = 0;
        int ret;
 
@@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
        regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
 }
 
-static void axp288_chrg_detect_complete(struct axp288_extcon_info *info)
-{
-       /*
-        * We depend on other drivers to do things like mux the data lines,
-        * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has
-        * not set these things up correctly resulting in the initial charger
-        * cable type detection giving a wrong result and we end up not charging
-        * or charging at only 0.5A.
-        *
-        * So we schedule a second cable type detection after 2 seconds to
-        * give the other drivers time to load and do their thing.
-        */
-       if (!info->first_detect_done) {
-               queue_delayed_work(system_wq, &info->det_work,
-                                  msecs_to_jiffies(2000));
-               info->first_detect_done = true;
-       }
-}
-
 static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
 {
        int ret, stat, cfg, pwr_stat;
@@ -223,8 +201,6 @@ no_vbus:
                info->previous_cable = cable;
        }
 
-       axp288_chrg_detect_complete(info);
-
        return 0;
 
 dev_det_ret:
@@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static void axp288_extcon_det_work(struct work_struct *work)
+static void axp288_extcon_enable(struct axp288_extcon_info *info)
 {
-       struct axp288_extcon_info *info =
-               container_of(work, struct axp288_extcon_info, det_work.work);
-
        regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
                                                BC_GLOBAL_RUN, 0);
        /* Enable the charger detection logic */
@@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
        info->regmap = axp20x->regmap;
        info->regmap_irqc = axp20x->regmap_irqc;
        info->previous_cable = EXTCON_NONE;
-       INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);
 
        platform_set_drvdata(pdev, info);
 
@@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
        }
 
        /* Start charger cable type detection */
-       queue_delayed_work(system_wq, &info->det_work, 0);
+       axp288_extcon_enable(info);
 
        return 0;
 }
index c8691b5a9cb00c0c77f95d278db541fa97e4f42b..191e99f06a9a5d55a2433b3daa31978fa454e533 100644 (file)
@@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev)
                return ret;
        }
 
-       /* queue initial processing of id-pin */
+       /* process id-pin so that we start with the right status */
        queue_delayed_work(system_wq, &data->work, 0);
+       flush_delayed_work(&data->work);
 
        platform_set_drvdata(pdev, data);
 
index 564bb7a31da43b4e924daf7501c36ec9a4eb8fc2..84e5a9df234433b430fc950a04d80dc9cd43bad8 100644 (file)
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
 
                desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
                                                &of_flags);
+               /*
+                * -EPROBE_DEFER in our case means that we found a
+                * valid GPIO property, but no controller has been
+                * registered so far.
+                *
+                * This means we don't need to look any further for
+                * alternate name conventions, and we should really
+                * preserve the return code for our user to be able to
+                * retry probing later.
+                */
+               if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
+                       return desc;
+
                if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
                        break;
        }
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
                desc = of_find_spi_gpio(dev, con_id, &of_flags);
 
        /* Special handling for regulator GPIOs if used */
-       if (IS_ERR(desc))
+       if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
                desc = of_find_regulator_gpio(dev, con_id, &of_flags);
 
        if (IS_ERR(desc))
index d5a2eefd6c3e9c634597dba673a6ee25a60c830d..74edba18b1596504ab76a45e71146dc7084e6c13 100644 (file)
@@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
 /*
  * Writeback
  */
-#define AMDGPU_MAX_WB 512      /* Reserve at most 512 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 128      /* Reserve at most 128 WB slots for amdgpu-owned rings. */
 
 struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
index 8ca3783f2debefbc32f1ad3d6bc475cfbc16b986..74d2efaec52f86a5dac357d2c52f775db67bdb83 100644 (file)
@@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        int r;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (encoder) {
                struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
        /* check acpi lid status ??? */
 
        amdgpu_connector_update_scratch_regs(connector, ret);
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
+
        return ret;
 }
 
@@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        int r;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        encoder = amdgpu_connector_best_single_encoder(connector);
        if (!encoder)
@@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
        amdgpu_connector_update_scratch_regs(connector, ret);
 
 out:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
@@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        bool dret = false, broken_edid = false;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
                ret = connector->status;
@@ -1115,8 +1127,10 @@ out:
        amdgpu_connector_update_scratch_regs(connector, ret);
 
 exit:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
@@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
        struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
        int r;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
                ret = connector->status;
@@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
 
        amdgpu_connector_update_scratch_regs(connector, ret);
 out:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
index 00a50cc5ec9a31a77b7476020e5f47089c83c5de..af1b879a9ee9bf38b30e3394715d764dc55aaee0 100644 (file)
@@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
                memset(&adev->wb.used, 0, sizeof(adev->wb.used));
 
                /* clear wb memory */
-               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
+               memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
        }
 
        return 0;
@@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
  */
 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
 {
+       wb >>= 3;
        if (wb < adev->wb.num_wb)
-               __clear_bit(wb >> 3, adev->wb.used);
+               __clear_bit(wb, adev->wb.used);
 }
 
 /**
@@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.hw)
                        continue;
-               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
-                       amdgpu_free_static_csa(adev);
-                       amdgpu_device_wb_fini(adev);
-                       amdgpu_device_vram_scratch_fini(adev);
-               }
 
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                        adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
@@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
+
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+                       amdgpu_free_static_csa(adev);
+                       amdgpu_device_wb_fini(adev);
+                       amdgpu_device_vram_scratch_fini(adev);
+               }
+
                r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
                /* XXX handle errors */
                if (r) {
@@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
                                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
                        }
                        drm_modeset_unlock_all(dev);
-               } else {
-                       /*
-                        * There is no equivalent atomic helper to turn on
-                        * display, so we defined our own function for this,
-                        * once suspend resume is supported by the atomic
-                        * framework this will be reworked
-                        */
-                       amdgpu_dm_display_resume(adev);
                }
        }
 
@@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        if (amdgpu_device_has_dc_support(adev)) {
                if (drm_atomic_helper_resume(adev->ddev, state))
                        dev_info(adev->dev, "drm resume failed:%d\n", r);
-               amdgpu_dm_display_resume(adev);
        } else {
                drm_helper_resume_force_mode(adev->ddev);
        }
index e14ab34d8262418084abdafe4015ad5ddee5c0a7..7c2be32c5aea6fd55a5747fd22d80e913d356c1c 100644 (file)
@@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
 static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = man->priv;
-
+       spin_lock(&mgr->lock);
        drm_mm_takedown(&mgr->mm);
        spin_unlock(&mgr->lock);
        kfree(mgr);
index 56bcd59c3399a0a912ea1fa8f869c105b8827327..36483e0d3c9729e555163e9f9a0b1cfe7dc319a8 100644 (file)
@@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
        r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
        if (r) {
                adev->irq.installed = false;
-               flush_work(&adev->hotplug_work);
+               if (!amdgpu_device_has_dc_support(adev))
+                       flush_work(&adev->hotplug_work);
                cancel_work_sync(&adev->reset_work);
                return r;
        }
@@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
                adev->irq.installed = false;
                if (adev->irq.msi_enabled)
                        pci_disable_msi(adev->pdev);
-               flush_work(&adev->hotplug_work);
+               if (!amdgpu_device_has_dc_support(adev))
+                       flush_work(&adev->hotplug_work);
                cancel_work_sync(&adev->reset_work);
        }
 
index 2719937e09d6bf00a4f78c47cd970ce5f5a51000..3b7e7af09ead1b8ea7ec8168b1252e71baaccb57 100644 (file)
@@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)
        for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
                BUG_ON(vm_inv_eng[i] > 16);
 
-       if (adev->asic_type == CHIP_VEGA10) {
+       if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
                r = gmc_v9_0_ecc_available(adev);
                if (r == 1) {
                        DRM_INFO("ECC is active.\n");
@@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
        adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
        if (!adev->mc.vram_width) {
                /* hbm memory channel size */
-               chansize = 128;
+               if (adev->flags & AMD_IS_APU)
+                       chansize = 64;
+               else
+                       chansize = 128;
 
                tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
                tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
index e92fb372bc99738dad957334ba40d3a138c57636..91cf95a8c39c832d50b378bf7464a99f19c839c8 100644 (file)
@@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u64 *wptr = NULL;
-       uint64_t local_wptr = 0;
+       u64 wptr;
 
        if (ring->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
-               wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
-               DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
-               *wptr = (*wptr) >> 2;
-               DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
+               wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+               DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
        } else {
                u32 lowbit, highbit;
                int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
-               wptr = &local_wptr;
                lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
                highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
 
                DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
                                me, highbit, lowbit);
-               *wptr = highbit;
-               *wptr = (*wptr) << 32;
-               *wptr |= lowbit;
+               wptr = highbit;
+               wptr = wptr << 32;
+               wptr |= lowbit;
        }
 
-       return *wptr;
+       return wptr >> 2;
 }
 
 /**
index b2bfedaf57f197b38e98dce034d12c1d3359ca64..9bab4842cd4411f8e282c083f526937b2979f6a0 100644 (file)
@@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
        .set_wptr = uvd_v6_0_enc_ring_set_wptr,
        .emit_frame_size =
                4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
-               6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
+               5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
                5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
                1, /* uvd_v6_0_enc_ring_insert_end */
        .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
index 1ce4c98385e3a17385a00b076a699cd64462d4dd..862835dc054e1ce66c76f114c5c9ccc2cf098d38 100644 (file)
@@ -629,11 +629,13 @@ static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
        struct amdgpu_display_manager *dm = &adev->dm;
+       int ret = 0;
 
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
-       return 0;
+       ret = amdgpu_dm_display_resume(adev);
+       return ret;
 }
 
 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
index 61e8c3e02d1696bf08f53bb24cefdc074a74cf10..639421a00ab629eefce3b7333126632f4d2b0848 100644 (file)
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(
        uint32_t retries_ch_eq;
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
        union lane_align_status_updated dpcd_lane_status_updated = {{0}};
-       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
 
        hw_tr_pattern = get_supported_tp(link);
 
@@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,
        /* MST doesn't perform link training for now
         * TODO: add MST specific link training routine
         */
-       if (is_mst_supported(link)) {
+       if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
                *link_setting = link->verified_link_cap;
                return;
        }
index 261811e0c094a81a0a1c0c1c6420f83d4db4a3bc..539c3e0a62922b7c7b774a1d8a115d6fcc4ab2b4 100644 (file)
@@ -197,7 +197,8 @@ bool dc_stream_set_cursor_attributes(
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
-               if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+               if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
+                   !pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
                        continue;
                if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
                        continue;
@@ -273,7 +274,8 @@ bool dc_stream_set_cursor_position(
                if (pipe_ctx->stream != stream ||
                                (!pipe_ctx->plane_res.mi  && !pipe_ctx->plane_res.hubp) ||
                                !pipe_ctx->plane_state ||
-                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+                               (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
+                               !pipe_ctx->plane_res.ipp)
                        continue;
 
                if (pipe_ctx->plane_state->address.type
index 4c3223a4d62b04d0264686e2eb6e493e6daf7c23..adb6e7b9280ce48650b4bb287a1b122e4c06002b 100644 (file)
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)
                if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
                        pr_err("smc start failed\n");
                        hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
-                       return -EINVAL;;
+                       return -EINVAL;
                }
                if (ret == PP_DPM_DISABLED)
                        goto exit;
index 41e42beff213905837c15db1841800b2021ec502..08e8a793714f26dc38e571a51b52f8421bb72444 100644 (file)
@@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                    PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
 
 
-       disable_mclk_switching = ((1 < info.display_count) ||
-                                 disable_mclk_switching_for_frame_lock ||
-                                 smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
-                                 (mode_info.refresh_rate > 120));
+       if (info.display_count == 0)
+               disable_mclk_switching = false;
+       else
+               disable_mclk_switching = ((1 < info.display_count) ||
+                                         disable_mclk_switching_for_frame_lock ||
+                                         smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
+                                         (mode_info.refresh_rate > 120));
 
        sclk = smu7_ps->performance_levels[0].engine_clock;
        mclk = smu7_ps->performance_levels[0].memory_clock;
@@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
        int tmp_result, result = 0;
        uint32_t sclk_mask = 0, mclk_mask = 0;
 
-       if (hwmgr->chip_id == CHIP_FIJI) {
-               if (request->type == AMD_PP_GFX_PROFILE)
-                       smu7_enable_power_containment(hwmgr);
-               else if (request->type == AMD_PP_COMPUTE_PROFILE)
-                       smu7_disable_power_containment(hwmgr);
-       }
-
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
                return -EINVAL;
 
index 2d55dabc77d4112d0b1c50bf1e1ae38ac0505dd0..5f9c3efb532f6c48471b75a0e5510369b98fffff 100644 (file)
@@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
        force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
 
-       disable_mclk_switching = (info.display_count > 1) ||
-                                   disable_mclk_switching_for_frame_lock ||
-                                   disable_mclk_switching_for_vr ||
-                                   force_mclk_high;
+       if (info.display_count == 0)
+               disable_mclk_switching = false;
+       else
+               disable_mclk_switching = (info.display_count > 1) ||
+                       disable_mclk_switching_for_frame_lock ||
+                       disable_mclk_switching_for_vr ||
+                       force_mclk_high;
 
        sclk = vega10_ps->performance_levels[0].gfx_clock;
        mclk = vega10_ps->performance_levels[0].mem_clock;
index cd23b1b28259435602efeaf99fcc885366291efe..c91b9b054e3f77805bd3ae00b0db3bf74144f6d1 100644 (file)
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
 {
 }
 
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                u16 *blue, uint32_t size,
-                                struct drm_modeset_acquire_ctx *ctx)
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct cirrus_device *cdev = dev->dev_private;
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
        int i;
 
        if (!crtc->enabled)
-               return 0;
+               return;
 
        r = crtc->gamma_store;
        g = r + crtc->gamma_size;
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                WREG8(PALETTE_DATA, *g++ >> 8);
                WREG8(PALETTE_DATA, *b++ >> 8);
        }
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+       cirrus_crtc_load_lut(crtc);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                u16 *blue, uint32_t size,
+                                struct drm_modeset_acquire_ctx *ctx)
+{
+       cirrus_crtc_load_lut(crtc);
 
        return 0;
 }
index ab4032167094cca0ddeb3583915af6fe07ae5301..ae3cbfe9e01cd6a52cc7c6a15ec809e971139ee4 100644 (file)
@@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
                new_crtc_state->event->base.completion = &commit->flip_done;
                new_crtc_state->event->base.completion_release = release_crtc_commit;
                drm_crtc_commit_get(commit);
+
+               commit->abort_completion = true;
        }
 
        for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
 {
        if (state->commit) {
+               /*
+                * In the event that a non-blocking commit returns
+                * -ERESTARTSYS before the commit_tail work is queued, we will
+                * have an extra reference to the commit object. Release it, if
+                * the event has not been consumed by the worker.
+                *
+                * state->event may be freed, so we can't directly look at
+                * state->event->base.completion.
+                */
+               if (state->event && state->commit->abort_completion)
+                       drm_crtc_commit_put(state->commit);
+
                kfree(state->commit->event);
                state->commit->event = NULL;
+
                drm_crtc_commit_put(state->commit);
        }
 
index ddd5379145758014f1a5a9915abb1528edc1cf17..4f751a9d71a34b858950ddae058bb50eaa281afa 100644 (file)
@@ -113,6 +113,9 @@ static const struct edid_quirk {
        /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
        { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
 
+       /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+       { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
        /* Belinea 10 15 55 */
        { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
        { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -162,6 +165,24 @@ static const struct edid_quirk {
 
        /* HTC Vive VR Headset */
        { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+
+       /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+       { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+       { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
+       { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+
+       /* Windows Mixed Reality Headsets */
+       { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+       { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
+       { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
+       { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
+       { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
+       { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+       { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
+       { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+
+       /* Sony PlayStation VR Headset */
+       { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
 };
 
 /*
index 5a13ff29f4f04af99c61fa07261a35a44ac0dd8e..c0530a1af5e39da421b87ba14d1b5f31f1a864f6 100644 (file)
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
        r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
        r.handles[0] = or->handle;
 
+       if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
+           dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
+               r.pixel_format = DRM_FORMAT_XBGR2101010;
+
        ret = drm_mode_addfb2(dev, &r, file_priv);
        if (ret)
                return ret;
index 186c4e90cc1cd25e06a19a89da1bf88a7d9e0cf7..89eef1bb4ddc3f606dea32ac24a3702c7293152c 100644 (file)
@@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
        if (!mm->color_adjust)
                return NULL;
 
-       hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
-       hole_start = __drm_mm_hole_node_start(hole);
-       hole_end = hole_start + hole->hole_size;
+       /*
+        * The hole found during scanning should ideally be the first element
+        * in the hole_stack list, but due to side-effects in the driver it
+        * may not be.
+        */
+       list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
+               hole_start = __drm_mm_hole_node_start(hole);
+               hole_end = hole_start + hole->hole_size;
+
+               if (hole_start <= scan->hit_start &&
+                   hole_end >= scan->hit_end)
+                       break;
+       }
+
+       /* We should only be called after we found the hole previously */
+       DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
+       if (unlikely(&hole->hole_stack == &mm->hole_stack))
+               return NULL;
 
        DRM_MM_BUG_ON(hole_start > scan->hit_start);
        DRM_MM_BUG_ON(hole_end < scan->hit_end);
index 555fbe54d6e224e2b58a7d263353efac2d7e0104..00b8445ba8192b63bf6015eb1442342c39b47378 100644 (file)
@@ -653,6 +653,26 @@ out:
                schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
 }
 
+/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker.  This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+       struct work_struct *work = current_work();
+
+       return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
 /**
  * drm_kms_helper_poll_disable - disable output polling
  * @dev: drm_device
index 2b8bf2dd63874e36e0d29c4c383a74c2d365489d..f68ef1b3a28c7fac6c427d975d8df015cd09b915 100644 (file)
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
 
        node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
        if (!node) {
-               dev_err(dev, "failed to allocate memory\n");
                ret = -ENOMEM;
                goto err;
        }
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
        struct drm_device *drm_dev = g2d->subdrv.drm_dev;
        struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
        struct drm_exynos_pending_g2d_event *e;
-       struct timeval now;
+       struct timespec64 now;
 
        if (list_empty(&runqueue_node->event_list))
                return;
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
        e = list_first_entry(&runqueue_node->event_list,
                             struct drm_exynos_pending_g2d_event, base.link);
 
-       do_gettimeofday(&now);
+       ktime_get_ts64(&now);
        e->event.tv_sec = now.tv_sec;
-       e->event.tv_usec = now.tv_usec;
+       e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
        e->event.cmdlist_no = cmdlist_no;
 
        drm_send_event(drm_dev, &e->base);
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
                return -EFAULT;
 
        runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
-       if (!runqueue_node) {
-               dev_err(dev, "failed to allocate memory\n");
+       if (!runqueue_node)
                return -ENOMEM;
-       }
+
        run_cmdlist = &runqueue_node->run_cmdlist;
        event_list = &runqueue_node->event_list;
        INIT_LIST_HEAD(run_cmdlist);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
deleted file mode 100644 (file)
index 71a0b4c..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- *     YoungJun Cho <yj44.cho@samsung.com>
- *     Eunchul Kim <chulspro.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef        _EXYNOS_DRM_ROTATOR_H_
-#define        _EXYNOS_DRM_ROTATOR_H_
-
-/* TODO */
-
-#endif
index a4b75a46f946306165e7140fa4b753fe116868ca..abd84cbcf1c2ca763e71d4e493661f8bef22170f 100644 (file)
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
        /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
        hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
                        | HDMI_I2S_SEL_LRCK(6));
-       hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
-                       | HDMI_I2S_SEL_SDATA2(4));
+
+       hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
+                       | HDMI_I2S_SEL_SDATA0(4));
+
        hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
                        | HDMI_I2S_SEL_SDATA2(2));
+
        hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
 
        /* I2S_CON_1 & 2 */
index 30496134a3d0720bcc649711bd8d2ef5d35cf47a..d7cbe53c4c01f440cf9c0aecec153dc3d4f3f912 100644 (file)
 #define EXYNOS_CIIMGEFF_FIN_EMBOSSING          (4 << 26)
 #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE         (5 << 26)
 #define EXYNOS_CIIMGEFF_FIN_MASK                       (7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK          ((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK          ((0xff << 13) | (0xff << 0))
 
 /* Real input DMA size register */
 #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE    (1 << 31)
index 04be0f7e81932682e7f5d9427580f46191061d9e..4420c203ac85e54e1afee35f0ef73c2aff550876 100644 (file)
 
 /* I2S_PIN_SEL_1 */
 #define HDMI_I2S_SEL_SDATA1(x)         (((x) & 0x7) << 4)
-#define HDMI_I2S_SEL_SDATA2(x)         ((x) & 0x7)
+#define HDMI_I2S_SEL_SDATA0(x)         ((x) & 0x7)
 
 /* I2S_PIN_SEL_2 */
 #define HDMI_I2S_SEL_SDATA3(x)         (((x) & 0x7) << 4)
index 4401068ff468ad36aef1d5df5277e2f008f01bb3..3ab1ace2a6bdd8a4be32828e58262d9ef9e20ae7 100644 (file)
@@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
                list_add_tail(&vma->exec_link, &eb->unbound);
                if (drm_mm_node_allocated(&vma->node))
                        err = i915_vma_unbind(vma);
+               if (unlikely(err))
+                       vma->exec_flags = NULL;
        }
        return err;
 }
@@ -2410,7 +2412,7 @@ err_request:
        if (out_fence) {
                if (err == 0) {
                        fd_install(out_fence_fd, out_fence->file);
-                       args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+                       args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
                        args->rsvd2 |= (u64)out_fence_fd << 32;
                        out_fence_fd = -1;
                } else {
index e09d18df8b7f18ca0b17f65797e5cbdc4baec072..a3e93d46316a267571f199cfcbd665434f41003f 100644 (file)
@@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->timeline->lock);
 
-       trace_i915_gem_request_execute(request);
-
        /* Transfer from per-context onto the global per-engine timeline */
        timeline = engine->timeline;
        GEM_BUG_ON(timeline == request->timeline);
@@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
        list_move_tail(&request->link, &timeline->requests);
        spin_unlock(&request->timeline->lock);
 
+       trace_i915_gem_request_execute(request);
+
        wake_up_all(&request->execute);
 }
 
index a2108e35c599982831cab1eab39c6b6721af1d80..33eb0c5b1d3244d944a9e80fa883352952cb5ac0 100644 (file)
@@ -2027,7 +2027,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_DW5_LN0_AE                0x162454
 #define _CNL_PORT_TX_DW5_LN0_B         0x162654
 #define _CNL_PORT_TX_DW5_LN0_C         0x162C54
-#define _CNL_PORT_TX_DW5_LN0_D         0x162ED4
+#define _CNL_PORT_TX_DW5_LN0_D         0x162E54
 #define _CNL_PORT_TX_DW5_LN0_F         0x162854
 #define CNL_PORT_TX_DW5_GRP(port)      _MMIO_PORT6(port, \
                                                    _CNL_PORT_TX_DW5_GRP_AE, \
@@ -2058,7 +2058,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_DW7_LN0_AE                0x16245C
 #define _CNL_PORT_TX_DW7_LN0_B         0x16265C
 #define _CNL_PORT_TX_DW7_LN0_C         0x162C5C
-#define _CNL_PORT_TX_DW7_LN0_D         0x162EDC
+#define _CNL_PORT_TX_DW7_LN0_D         0x162E5C
 #define _CNL_PORT_TX_DW7_LN0_F         0x16285C
 #define CNL_PORT_TX_DW7_GRP(port)      _MMIO_PORT6(port, \
                                                    _CNL_PORT_TX_DW7_GRP_AE, \
index 522d54fecb53489193eb2b72dbf9fdd41009ac67..4a01f62a392dd18569ca0f3503c87bd1bbbe59b9 100644 (file)
@@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
 {
        struct intel_encoder *encoder;
 
-       if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
-               return NULL;
-
        /* MST */
        if (pipe >= 0) {
+               if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+                       return NULL;
+
                encoder = dev_priv->av_enc_map[pipe];
                /*
                 * when bootup, audio driver may not know it is
index 5155f0179b61744f41f18922a9d0c1b39ec28b10..05520202c96778c1401dac07a9b9ff768ba97b91 100644 (file)
@@ -36,6 +36,7 @@
 #include "meson_venc.h"
 #include "meson_vpp.h"
 #include "meson_viu.h"
+#include "meson_canvas.h"
 #include "meson_registers.h"
 
 /* CRTC definition */
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
                } else
                        meson_vpp_disable_interlace_vscaler_osd1(priv);
 
+               meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+                          priv->viu.osd1_addr, priv->viu.osd1_stride,
+                          priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+                          MESON_CANVAS_BLKMODE_LINEAR);
+
                /* Enable OSD1 */
                writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
                                    priv->io_base + _REG(VPP_MISC));
index 5e8b392b9d1ff0da66a429b386a308019467c62a..8450d6ac8c9bc1dcd049fd8c2205d1c5a8c7c924 100644 (file)
@@ -43,6 +43,9 @@ struct meson_drm {
                bool osd1_commit;
                uint32_t osd1_ctrl_stat;
                uint32_t osd1_blk0_cfg[5];
+               uint32_t osd1_addr;
+               uint32_t osd1_stride;
+               uint32_t osd1_height;
        } viu;
 
        struct {
index d0a6ac8390f39fd64dd194b76c3078e374ef51f2..27bd3503e1e49eb82b51a9b2cf11bfd7593da20a 100644 (file)
@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
        /* Update Canvas with buffer address */
        gem = drm_fb_cma_get_gem_obj(fb, 0);
 
-       meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
-                          gem->paddr, fb->pitches[0],
-                          fb->height, MESON_CANVAS_WRAP_NONE,
-                          MESON_CANVAS_BLKMODE_LINEAR);
+       priv->viu.osd1_addr = gem->paddr;
+       priv->viu.osd1_stride = fb->pitches[0];
+       priv->viu.osd1_height = fb->height;
 
        spin_unlock_irqrestore(&priv->drm->event_lock, flags);
 }
index 3e9bba4d66246b10a2f4a914fbddf97513681de0..6d8e3a9a6fc093164adc20e37e7b70db54cfe7bf 100644 (file)
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        } else {
                dev_info(&pdev->dev,
                         "no iommu, fallback to phys contig buffers for scanout\n");
-               aspace = NULL;;
+               aspace = NULL;
        }
 
        pm_runtime_put_sync(&pdev->dev);
index 69d6e61a01ecfb3e48d6ecaa7749b532227ec0bf..6ed9cb053dfa5fd3bff312b155541ef17a4aadad 100644 (file)
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       ret = pm_runtime_get_sync(connector->dev->dev);
-       if (ret < 0 && ret != -EACCES)
-               return conn_status;
+       /* Outputs are only polled while runtime active, so acquiring a
+        * runtime PM ref here is unnecessary (and would deadlock upon
+        * runtime suspend because it waits for polling to finish).
+        */
+       if (!drm_kms_helper_is_poll_worker()) {
+               ret = pm_runtime_get_sync(connector->dev->dev);
+               if (ret < 0 && ret != -EACCES)
+                       return conn_status;
+       }
 
        nv_encoder = nouveau_connector_ddc_detect(connector);
        if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -647,8 +653,10 @@ detect_analog:
 
  out:
 
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return conn_status;
 }
index dd8d4352ed998e7f09d36a981098edae84830808..caddce88d2d8b3e5eaf0528c609fc2a190c8294f 100644 (file)
@@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)
        nouveau_display(dev)->fini = nv50_display_fini;
        disp->disp = &nouveau_display(dev)->disp;
        dev->mode_config.funcs = &nv50_disp_func;
+       dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
        if (nouveau_atomic)
                dev->driver->driver_features |= DRIVER_ATOMIC;
 
index 5012f5e47a1e599d1758b91957d79925487de260..2e2ca3c6b47d374fc6650d15bfab5d7934740a2f 100644 (file)
@@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        int r;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (encoder) {
                struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
        /* check acpi lid status ??? */
 
        radeon_connector_update_scratch_regs(connector, ret);
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
+
        return ret;
 }
 
@@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        int r;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        encoder = radeon_best_single_encoder(connector);
        if (!encoder)
@@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
        radeon_connector_update_scratch_regs(connector, ret);
 
 out:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
@@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
        if (!radeon_connector->dac_load_detect)
                return ret;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        encoder = radeon_best_single_encoder(connector);
        if (!encoder)
@@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
        if (ret == connector_status_connected)
                ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
        radeon_connector_update_scratch_regs(connector, ret);
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
+
        return ret;
 }
 
@@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret = connector_status_disconnected;
        bool dret = false, broken_edid = false;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (radeon_connector->detected_hpd_without_ddc) {
                force = true;
@@ -1436,8 +1454,10 @@ out:
        }
 
 exit:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
@@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
        if (radeon_dig_connector->is_mst)
                return connector_status_disconnected;
 
-       r = pm_runtime_get_sync(connector->dev->dev);
-       if (r < 0)
-               return connector_status_disconnected;
+       if (!drm_kms_helper_is_poll_worker()) {
+               r = pm_runtime_get_sync(connector->dev->dev);
+               if (r < 0)
+                       return connector_status_disconnected;
+       }
 
        if (!force && radeon_check_hpd_status_unchanged(connector)) {
                ret = connector->status;
@@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
        }
 
 out:
-       pm_runtime_mark_last_busy(connector->dev->dev);
-       pm_runtime_put_autosuspend(connector->dev->dev);
+       if (!drm_kms_helper_is_poll_worker()) {
+               pm_runtime_mark_last_busy(connector->dev->dev);
+               pm_runtime_put_autosuspend(connector->dev->dev);
+       }
 
        return ret;
 }
index 8d3e3d2e0090938c2ec895d0836871622fad0bd7..7828a5e1062999b1bae507440b6dd8847095546b 100644 (file)
@@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,
        if ((rdev->flags & RADEON_IS_PCI) &&
            (rdev->family <= CHIP_RS740))
                rdev->need_dma32 = true;
+#ifdef CONFIG_PPC64
+       if (rdev->family == CHIP_CEDAR)
+               rdev->need_dma32 = true;
+#endif
 
        dma_bits = rdev->need_dma32 ? 32 : 40;
        r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
index 326ad068c15aa63ef38a6c85fbcc368ad52bd801..4b6542538ff91581272deadb6e971c50f2429033 100644 (file)
@@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
 static void radeon_pm_update_profile(struct radeon_device *rdev);
 static void radeon_pm_set_clocks(struct radeon_device *rdev);
-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
 
 int radeon_pm_get_type_index(struct radeon_device *rdev,
                             enum radeon_pm_state_type ps_type,
@@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
                                radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
                }
                mutex_unlock(&rdev->pm.mutex);
-               /* allow new DPM state to be picked */
-               radeon_pm_compute_clocks_dpm(rdev);
        } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (rdev->pm.profile == PM_PROFILE_AUTO) {
                        mutex_lock(&rdev->pm.mutex);
@@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
                dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
        /* balanced states don't exist at the moment */
        if (dpm_state == POWER_STATE_TYPE_BALANCED)
-               dpm_state = rdev->pm.dpm.ac_power ?
-                       POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
+               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 
 restart_search:
        /* Pick the best power state based on current conditions */
index 2c18996d59c58e6247a24936da9dc155f76d78cf..0d95888ccc3e778bb9752376682c5cccba525e7b 100644 (file)
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 {
        struct drm_sched_job *s_job;
        struct drm_sched_entity *entity, *tmp;
-       int i;;
+       int i;
 
        spin_lock(&sched->job_list_lock);
        list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
index 3c15cf24b50360918253ed5bcff4d4a73a1e6cd0..b3960118deb9eb22da1134ca9525e0c87dc17275 100644 (file)
@@ -260,7 +260,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
                                        const struct drm_display_mode *mode)
 {
        /* Configure the dot clock */
-       clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+       clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
 
        /* Set the resolution */
        regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -335,6 +335,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
        regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
                           SUN4I_TCON_GCTL_IOMAP_MASK,
                           SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+       /* Enable the output on the pins */
+       regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
 }
 
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
@@ -418,7 +421,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
        WARN_ON(!tcon->quirks->has_channel_1);
 
        /* Configure the dot clock */
-       clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
+       clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
 
        /* Adjust clock delay */
        clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
index 5720a0d4ac0a9ebf242755921bc1ac6fea8c2630..677ac16c8a6dea750e80ec914973344d972bb765 100644 (file)
@@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
        case VIRTGPU_PARAM_3D_FEATURES:
                value = vgdev->has_virgl_3d == true ? 1 : 0;
                break;
+       case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
+               value = 1;
+               break;
        default:
                return -EINVAL;
        }
@@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct drm_virtgpu_get_caps *args = data;
-       int size;
+       unsigned size, host_caps_size;
        int i;
        int found_valid = -1;
        int ret;
@@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
        if (vgdev->num_capsets == 0)
                return -ENOSYS;
 
+       /* don't allow userspace to pass 0 */
+       if (args->size == 0)
+               return -EINVAL;
+
        spin_lock(&vgdev->display_info_lock);
        for (i = 0; i < vgdev->num_capsets; i++) {
                if (vgdev->capsets[i].id == args->cap_set_id) {
@@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
                return -EINVAL;
        }
 
-       size = vgdev->capsets[found_valid].max_size;
-       if (args->size > size) {
-               spin_unlock(&vgdev->display_info_lock);
-               return -EINVAL;
-       }
+       host_caps_size = vgdev->capsets[found_valid].max_size;
+       /* only copy to user the minimum of the host caps size or the guest caps size */
+       size = min(args->size, host_caps_size);
 
        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
                if (cache_ent->id == args->cap_set_id &&
index 658fa2d3e40c260d051d4299bda4eddb0af5abeb..48685cddbad1b7218519841c58478e12333bb90e 100644 (file)
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
 {
        struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+       static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
 
        chained_irq_enter(chip, desc);
 
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)
 {
        struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
-       const int int_reg[] = { 4, 5, 8, 9};
+       static const int int_reg[] = { 4, 5, 8, 9};
 
        chained_irq_enter(chip, desc);
 
index bb9c087e6c0d2e81dddabc47f4174a91af67220c..9f2d9ec42add6731069cad774762efc06eab0139 100644 (file)
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
        case V4L2_PIX_FMT_SGBRG8:
        case V4L2_PIX_FMT_SGRBG8:
        case V4L2_PIX_FMT_SRGGB8:
+       case V4L2_PIX_FMT_GREY:
                offset = image->rect.left + image->rect.top * pix->bytesperline;
                break;
        case V4L2_PIX_FMT_SBGGR16:
        case V4L2_PIX_FMT_SGBRG16:
        case V4L2_PIX_FMT_SGRBG16:
        case V4L2_PIX_FMT_SRGGB16:
+       case V4L2_PIX_FMT_Y16:
                offset = image->rect.left * 2 +
                         image->rect.top * pix->bytesperline;
                break;
index 24e12b87a0cbe387b24557a230a1ed1d05e150ca..caa05b0702e1671c39c15edff2e3801a37e83074 100644 (file)
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
        case MEDIA_BUS_FMT_SGBRG10_1X10:
        case MEDIA_BUS_FMT_SGRBG10_1X10:
        case MEDIA_BUS_FMT_SRGGB10_1X10:
+       case MEDIA_BUS_FMT_Y10_1X10:
                cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
                cfg->mipi_dt = MIPI_DT_RAW10;
                cfg->data_width = IPU_CSI_DATA_WIDTH_10;
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
        case MEDIA_BUS_FMT_SGBRG12_1X12:
        case MEDIA_BUS_FMT_SGRBG12_1X12:
        case MEDIA_BUS_FMT_SRGGB12_1X12:
+       case MEDIA_BUS_FMT_Y12_1X12:
                cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
                cfg->mipi_dt = MIPI_DT_RAW12;
                cfg->data_width = IPU_CSI_DATA_WIDTH_12;
index f1cec3d70498ab526f9765f66422e0835f338dc0..0f70e88475409f8795dfdb6f36e5a1a452badc7d 100644 (file)
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
                if (pre_node == pre->dev->of_node) {
                        mutex_unlock(&ipu_pre_list_mutex);
                        device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+                       of_node_put(pre_node);
                        return pre;
                }
        }
        mutex_unlock(&ipu_pre_list_mutex);
 
+       of_node_put(pre_node);
+
        return NULL;
 }
 
index 067365c733c63b257f3be41102d7f54354d52f18..97b99500153d3e1477e40e3c0c4d5958c37a9c71 100644 (file)
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
                        mutex_unlock(&ipu_prg_list_mutex);
                        device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
                        prg->id = ipu_id;
+                       of_node_put(prg_node);
                        return prg;
                }
        }
        mutex_unlock(&ipu_prg_list_mutex);
 
+       of_node_put(prg_node);
+
        return NULL;
 }
 
index 43ddcdfbd0da415b4fc397a89582290f3f37c786..9454ac134ce22fee3658d210c4046dbe2d0c5626 100644 (file)
 #define USB_DEVICE_ID_LD_MICROCASSYTIME                0x1033
 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
 #define USB_DEVICE_ID_LD_MICROCASSYPH          0x1038
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY    0x1040
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY      0x1042
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY      0x1043
 #define USB_DEVICE_ID_LD_JWM           0x1080
 #define USB_DEVICE_ID_LD_DMMP          0x1081
 #define USB_DEVICE_ID_LD_UMIP          0x1090
index 5f6035a5ce367a947aa8fb773de8b39c5bd22bd7..e92b77fa574a940439ec8059265ba440dc91b993 100644 (file)
@@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
index a9805c7cb305ac56162519a65b4398cb2f39fd73..e2954fb86d659f366a250ab65640ed2e0ad002ec 100644 (file)
@@ -123,8 +123,10 @@ config I2C_I801
            Wildcat Point (PCH)
            Wildcat Point-LP (PCH)
            BayTrail (SOC)
+           Braswell (SOC)
            Sunrise Point-H (PCH)
            Sunrise Point-LP (PCH)
+           Kaby Lake-H (PCH)
            DNV (SOC)
            Broxton (SOC)
            Lewisburg (PCH)
index cd07a69e2e9355540442785f95e90823b05c9d10..44deae78913e5fa259927d1b5e8b6bf9aee60138 100644 (file)
@@ -50,6 +50,9 @@
 #define BCM2835_I2C_S_CLKT     BIT(9)
 #define BCM2835_I2C_S_LEN      BIT(10) /* Fake bit for SW error reporting */
 
+#define BCM2835_I2C_FEDL_SHIFT 16
+#define BCM2835_I2C_REDL_SHIFT 0
+
 #define BCM2835_I2C_CDIV_MIN   0x0002
 #define BCM2835_I2C_CDIV_MAX   0xFFFE
 
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
 
 static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
 {
-       u32 divider;
+       u32 divider, redl, fedl;
 
        divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
                               i2c_dev->bus_clk_rate);
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
 
        bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
 
+       /*
+        * Number of core clocks to wait after falling edge before
+        * outputting the next data bit.  Note that both FEDL and REDL
+        * can't be greater than CDIV/2.
+        */
+       fedl = max(divider / 16, 1u);
+
+       /*
+        * Number of core clocks to wait after rising edge before
+        * sampling the next incoming data bit.
+        */
+       redl = max(divider / 4, 1u);
+
+       bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL,
+                          (fedl << BCM2835_I2C_FEDL_SHIFT) |
+                          (redl << BCM2835_I2C_REDL_SHIFT));
        return 0;
 }
 
index ae691884d07161c9940aad5162d8b6dda6eadd9c..05732531829fe090185d5f133ea2d7958a94e87a 100644 (file)
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
        i2c_dw_disable_int(dev);
 
        /* Enable the adapter */
-       __i2c_dw_enable(dev, true);
+       __i2c_dw_enable_and_wait(dev, true);
 
        /* Clear and enable interrupts */
        dw_readl(dev, DW_IC_CLR_INTR);
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
        gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
        if (IS_ERR(gpio)) {
                r = PTR_ERR(gpio);
-               if (r == -ENOENT)
+               if (r == -ENOENT || r == -ENOSYS)
                        return 0;
                return r;
        }
index 8eac00efadc1ad8f8e477094e26790a1ec317117..692b341258667493a4eb0ae1996528d5fee52c3b 100644 (file)
@@ -58,6 +58,7 @@
  * Wildcat Point (PCH)         0x8ca2  32      hard    yes     yes     yes
  * Wildcat Point-LP (PCH)      0x9ca2  32      hard    yes     yes     yes
  * BayTrail (SOC)              0x0f12  32      hard    yes     yes     yes
+ * Braswell (SOC)              0x2292  32      hard    yes     yes     yes
  * Sunrise Point-H (PCH)       0xa123  32      hard    yes     yes     yes
  * Sunrise Point-LP (PCH)      0x9d23  32      hard    yes     yes     yes
  * DNV (SOC)                   0x19df  32      hard    yes     yes     yes
index 2fd8b6d0039106976ced7443b184b5dcacd81576..87197ece0f9034192aaf36e4baa3520d8eeb772f 100644 (file)
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, adap);
        init_completion(&siic->done);
 
-       /* Controller Initalisation */
+       /* Controller initialisation */
 
        writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
        while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
         * but they start to affect the speed when clock is set to faster
         * frequencies.
         * Through the actual tests, use the different user_div value(which
-        * in the divider formular 'Fio / (Fi2c * user_div)') to adapt
+        * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
         * the different ranges of i2c bus clock frequency, to make the SCL
         * more accurate.
         */
index 17fd55af4d9247ee89c0b581c5de3ab3437860a2..caa20eb5f26b03c7e02569e54dbb599fd2e73ec1 100644 (file)
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data)
 {
        struct gendisk *p = data;
 
-       if (!get_disk(p))
+       if (!get_disk_and_module(p))
                return -1;
        return 0;
 }
index 327a49ba19916e1405c41980ef7677f5bf7a8bdb..9515ca165dfdbc17258be4b0c578efb851a883a5 100644 (file)
@@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
                                         ASPEED_ADC_INIT_POLLING_TIME,
                                         ASPEED_ADC_INIT_TIMEOUT);
                if (ret)
-                       goto scaler_error;
+                       goto poll_timeout_error;
        }
 
        /* Start all channels in normal mode. */
@@ -274,9 +274,10 @@ iio_register_error:
        writel(ASPEED_OPERATION_MODE_POWER_DOWN,
                data->base + ASPEED_REG_ENGINE_CONTROL);
        clk_disable_unprepare(data->clk_scaler->clk);
-reset_error:
-       reset_control_assert(data->rst);
 clk_enable_error:
+poll_timeout_error:
+       reset_control_assert(data->rst);
+reset_error:
        clk_hw_unregister_divider(data->clk_scaler);
 scaler_error:
        clk_hw_unregister_divider(data->clk_prescaler);
index 7f5def465340e6444e8023a2e11d8b0b877d89b0..9a2583caedaaefd8c028081ff947c04a5f612496 100644 (file)
@@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
        int ret;
        u32 val;
 
-       /* Clear ADRDY by writing one, then enable ADC */
-       stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
        stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
 
        /* Poll for ADRDY to be set (after adc startup time) */
@@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
                                           val & STM32H7_ADRDY,
                                           100, STM32_ADC_TIMEOUT_US);
        if (ret) {
-               stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
+               stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
                dev_err(&indio_dev->dev, "Failed to enable ADC\n");
+       } else {
+               /* Clear ADRDY by writing one */
+               stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
        }
 
        return ret;
index 0dd5a381be64f6db3e538553cc030a664b8d3ccd..457372f36791b9efa6787a53f823fda62d4ca5d1 100644 (file)
@@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
        if (adis->trig == NULL)
                return -ENOMEM;
 
+       adis->trig->dev.parent = &adis->spi->dev;
+       adis->trig->ops = &adis_trigger_ops;
+       iio_trigger_set_drvdata(adis->trig, adis);
+
        ret = request_irq(adis->spi->irq,
                          &iio_trigger_generic_data_rdy_poll,
                          IRQF_TRIGGER_RISING,
@@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
        if (ret)
                goto error_free_trig;
 
-       adis->trig->dev.parent = &adis->spi->dev;
-       adis->trig->ops = &adis_trigger_ops;
-       iio_trigger_set_drvdata(adis->trig, adis);
        ret = iio_trigger_register(adis->trig);
 
        indio_dev->trig = iio_trigger_get(adis->trig);
index 79abf70a126ddde78e0a60fe5adecb829f0423ce..cd5bfe39591bb2b2d44b3848cc2d84ef2d4a38f2 100644 (file)
@@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp,
        struct iio_dev *indio_dev = filp->private_data;
        struct iio_buffer *rb = indio_dev->buffer;
 
-       if (!indio_dev->info)
+       if (!indio_dev->info || rb == NULL)
                return 0;
 
        poll_wait(filp, &rb->pollq, wait);
index fcb1c4ba5e4143d2c8457b5a436355ca2158899a..f726f9427602f9ddff3820797ac40111702c937c 100644 (file)
@@ -68,6 +68,8 @@ config SX9500
 
 config SRF08
        tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor"
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        depends on I2C
        help
          Say Y here to build a driver for Devantech SRF02/SRF08/SRF10
index c4560d84dfaebd550f504f2ed6b5f30bfb7ad520..25bb178f60742233c018dc7edb51c0e6520689af 100644 (file)
@@ -305,16 +305,21 @@ void nldev_exit(void);
 static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
                                          struct ib_pd *pd,
                                          struct ib_qp_init_attr *attr,
-                                         struct ib_udata *udata)
+                                         struct ib_udata *udata,
+                                         struct ib_uobject *uobj)
 {
        struct ib_qp *qp;
 
+       if (!dev->create_qp)
+               return ERR_PTR(-EOPNOTSUPP);
+
        qp = dev->create_qp(pd, attr, udata);
        if (IS_ERR(qp))
                return qp;
 
        qp->device = dev;
        qp->pd = pd;
+       qp->uobject = uobj;
        /*
         * We don't track XRC QPs for now, because they don't have PD
         * and more importantly they are created internaly by driver,
index 85b5ee4defa4bed1cabaeaa1fe0f1121bc91c242..d8eead5d106df4326c476699cb44ee8756ddeec1 100644 (file)
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
         */
        uobj->context = context;
        uobj->type = type;
-       atomic_set(&uobj->usecnt, 0);
+       /*
+        * Allocated objects start out as write locked to deny any other
+        * syscalls from accessing them until they are committed. See
+        * rdma_alloc_commit_uobject
+        */
+       atomic_set(&uobj->usecnt, -1);
        kref_init(&uobj->ref);
 
        return uobj;
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
                goto free;
        }
 
-       uverbs_uobject_get(uobj);
+       /*
+        * The idr_find is guaranteed to return a pointer to something that
+        * isn't freed yet, or NULL, as the free after idr_remove goes through
+        * kfree_rcu(). However the object may still have been released and
+        * kfree() could be called at any time.
+        */
+       if (!kref_get_unless_zero(&uobj->ref))
+               uobj = ERR_PTR(-ENOENT);
+
 free:
        rcu_read_unlock();
        return uobj;
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
        return ret;
 }
 
-static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
+static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
 {
 #ifdef CONFIG_LOCKDEP
        if (exclusive)
-               WARN_ON(atomic_read(&uobj->usecnt) > 0);
+               WARN_ON(atomic_read(&uobj->usecnt) != -1);
        else
-               WARN_ON(atomic_read(&uobj->usecnt) == -1);
+               WARN_ON(atomic_read(&uobj->usecnt) <= 0);
 #endif
 }
 
@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
                WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
                return 0;
        }
-       lockdep_check(uobj, true);
+       assert_uverbs_usecnt(uobj, true);
        ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
 
        up_read(&ucontext->cleanup_rwsem);
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
                WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
                return 0;
        }
-       lockdep_check(uobject, true);
+       assert_uverbs_usecnt(uobject, true);
        ret = uobject->type->type_class->remove_commit(uobject,
                                                       RDMA_REMOVE_DESTROY);
        if (ret)
-               return ret;
+               goto out;
 
        uobject->type = &null_obj_type;
 
+out:
        up_read(&ucontext->cleanup_rwsem);
-       return 0;
+       return ret;
 }
 
 static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
                return ret;
        }
 
+       /* matches atomic_set(-1) in alloc_uobj */
+       assert_uverbs_usecnt(uobj, true);
+       atomic_set(&uobj->usecnt, 0);
+
        uobj->type->type_class->alloc_commit(uobj);
        up_read(&uobj->context->cleanup_rwsem);
 
@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
 
 void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
 {
-       lockdep_check(uobj, exclusive);
+       assert_uverbs_usecnt(uobj, exclusive);
        uobj->type->type_class->lookup_put(uobj, exclusive);
        /*
         * In order to unlock an object, either decrease its usecnt for
index 857637bf46da275262f3d4c023b27411f86e9e40..3dbc4e4cca415fd94a24443e44d1430a5cbe63d7 100644 (file)
@@ -7,7 +7,6 @@
 #include <rdma/restrack.h>
 #include <linux/mutex.h>
 #include <linux/sched/task.h>
-#include <linux/uaccess.h>
 #include <linux/pid_namespace.h>
 
 void rdma_restrack_init(struct rdma_restrack_root *res)
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
 {
        enum rdma_restrack_type type = res->type;
        struct ib_device *dev;
-       struct ib_xrcd *xrcd;
        struct ib_pd *pd;
        struct ib_cq *cq;
        struct ib_qp *qp;
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
                qp = container_of(res, struct ib_qp, res);
                dev = qp->device;
                break;
-       case RDMA_RESTRACK_XRCD:
-               xrcd = container_of(res, struct ib_xrcd, res);
-               dev = xrcd->device;
-               break;
        default:
                WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
                return NULL;
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
        return dev;
 }
 
+static bool res_is_user(struct rdma_restrack_entry *res)
+{
+       switch (res->type) {
+       case RDMA_RESTRACK_PD:
+               return container_of(res, struct ib_pd, res)->uobject;
+       case RDMA_RESTRACK_CQ:
+               return container_of(res, struct ib_cq, res)->uobject;
+       case RDMA_RESTRACK_QP:
+               return container_of(res, struct ib_qp, res)->uobject;
+       default:
+               WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
+               return false;
+       }
+}
+
 void rdma_restrack_add(struct rdma_restrack_entry *res)
 {
        struct ib_device *dev = res_to_dev(res);
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
        if (!dev)
                return;
 
-       if (!uaccess_kernel()) {
+       if (res_is_user(res)) {
                get_task_struct(current);
                res->task = current;
                res->kern_name = NULL;
index 256934d1f64fba0e6461f857d372e31f82049b4c..a148de35df8d4008bf2534c4920f6ff075c0e698 100644 (file)
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
        if (f.file)
                fdput(f);
 
+       mutex_unlock(&file->device->xrcd_tree_mutex);
+
        uobj_alloc_commit(&obj->uobject);
 
-       mutex_unlock(&file->device->xrcd_tree_mutex);
        return in_len;
 
 err_copy:
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
 
        uobj  = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
                               file->ucontext);
-       if (IS_ERR(uobj)) {
-               mutex_unlock(&file->device->xrcd_tree_mutex);
+       if (IS_ERR(uobj))
                return PTR_ERR(uobj);
-       }
 
        ret = uobj_remove_commit(uobj);
        return ret ?: in_len;
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
        struct ib_uverbs_ex_create_cq_resp resp;
        struct ib_cq_init_attr attr = {};
 
+       if (!ib_dev->create_cq)
+               return ERR_PTR(-EOPNOTSUPP);
+
        if (cmd->comp_vector >= file->device->num_comp_vectors)
                return ERR_PTR(-EINVAL);
 
@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
        resp.response_length = offsetof(typeof(resp), response_length) +
                sizeof(resp.response_length);
 
+       cq->res.type = RDMA_RESTRACK_CQ;
+       rdma_restrack_add(&cq->res);
+
        ret = cb(file, obj, &resp, ucore, context);
        if (ret)
                goto err_cb;
 
        uobj_alloc_commit(&obj->uobject);
-       cq->res.type = RDMA_RESTRACK_CQ;
-       rdma_restrack_add(&cq->res);
-
        return obj;
 
 err_cb:
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
        if (cmd->qp_type == IB_QPT_XRC_TGT)
                qp = ib_create_qp(pd, &attr);
        else
-               qp = _ib_create_qp(device, pd, &attr, uhw);
+               qp = _ib_create_qp(device, pd, &attr, uhw,
+                                  &obj->uevent.uobject);
 
        if (IS_ERR(qp)) {
                ret = PTR_ERR(qp);
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
                        atomic_inc(&attr.srq->usecnt);
                if (ind_tbl)
                        atomic_inc(&ind_tbl->usecnt);
+       } else {
+               /* It is done in _ib_create_qp for other QP types */
+               qp->uobject = &obj->uevent.uobject;
        }
-       qp->uobject = &obj->uevent.uobject;
 
        obj->uevent.uobject.object = qp;
 
@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
                goto release_qp;
        }
 
+       if ((cmd->base.attr_mask & IB_QP_AV) &&
+           !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+               ret = -EINVAL;
+               goto release_qp;
+       }
+
        if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
-           !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+           (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
+           !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
                ret = -EINVAL;
                goto release_qp;
        }
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
                wq_init_attr.create_flags = cmd.create_flags;
        obj->uevent.events_reported = 0;
        INIT_LIST_HEAD(&obj->uevent.event_list);
+
+       if (!pd->device->create_wq) {
+               err = -EOPNOTSUPP;
+               goto err_put_cq;
+       }
        wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
        if (IS_ERR(wq)) {
                err = PTR_ERR(wq);
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
                wq_attr.flags = cmd.flags;
                wq_attr.flags_mask = cmd.flags_mask;
        }
+       if (!wq->device->modify_wq) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
        ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
+out:
        uobj_put_obj_read(wq);
        return ret;
 }
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
 
        init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
        init_attr.ind_tbl = wqs;
+
+       if (!ib_dev->create_rwq_ind_table) {
+               err = -EOPNOTSUPP;
+               goto err_uobj;
+       }
        rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
 
        if (IS_ERR(rwq_ind_tbl)) {
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
        struct ib_device_attr attr = {0};
        int err;
 
+       if (!ib_dev->query_device)
+               return -EOPNOTSUPP;
+
        if (ucore->inlen < sizeof(cmd))
                return -EINVAL;
 
index d96dc1d17be189a287475a076929af9b9cedd7f1..339b851450446567d34780aec5779d0aa76fedeb 100644 (file)
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
                        return 0;
        }
 
+       if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
+               return -EINVAL;
+
        spec = &attr_spec_bucket->attrs[attr_id];
        e = &elements[attr_id];
        e->uattr = uattr_ptr;
index 062485f9300dc05fdd581fcf487032bafe9317d7..62e1eb1d2a28ad1bd854699c62a1da6149443286 100644 (file)
@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
        short min = SHRT_MAX;
        const void *elem;
        int i, j, last_stored = -1;
+       unsigned int equal_min = 0;
 
        for_each_element(elem, i, j, elements, num_elements, num_offset,
                         data_offset) {
@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
                 */
                iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
                last_stored = i;
+               if (min == GET_ID(id))
+                       equal_min++;
+               else
+                       equal_min = 1;
                min = GET_ID(id);
        }
 
@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
         * Therefore, we need to clean the beginning of the array to make sure
         * all ids of final elements are equal to min.
         */
-       for (i = num_iters - 1; i >= 0 &&
-            GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
-               ;
-
-       num_iters -= i + 1;
-       memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
+       memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
 
        *min_id = min;
-       return num_iters;
+       return equal_min;
 }
 
 #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
                hash = kzalloc(sizeof(*hash) +
                               ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
                                     sizeof(long)) +
-                              BITS_TO_LONGS(attr_max_bucket) * sizeof(long),
+                              BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
                               GFP_KERNEL);
                if (!hash) {
                        res = -ENOMEM;
@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
                         * first handler which != NULL. This also defines the
                         * set of flags used for this handler.
                         */
-                       for (i = num_object_defs - 1;
+                       for (i = num_method_defs - 1;
                             i >= 0 && !method_defs[i]->handler; i--)
                                ;
                        hash->methods[min_id++] = method;
index 395a3b091229fbec064e17698e8dcc5967b9f9e5..b1ca223aa3808425cb0b435c65478b7fce85371f 100644 (file)
@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
        return -1;
 }
 
+static bool verify_command_idx(u32 command, bool extended)
+{
+       if (extended)
+               return command < ARRAY_SIZE(uverbs_ex_cmd_table);
+
+       return command < ARRAY_SIZE(uverbs_cmd_table);
+}
+
 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                             size_t count, loff_t *pos)
 {
        struct ib_uverbs_file *file = filp->private_data;
        struct ib_device *ib_dev;
        struct ib_uverbs_cmd_hdr hdr;
+       bool extended_command;
        __u32 command;
        __u32 flags;
        int srcu_key;
@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        }
 
        command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+       flags = (hdr.command &
+                IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+
+       extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
+       if (!verify_command_idx(command, extended_command)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (verify_command_mask(ib_dev, command)) {
                ret = -EOPNOTSUPP;
                goto out;
@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
-       flags = (hdr.command &
-                IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
-
        if (!flags) {
-               if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
-                   !uverbs_cmd_table[command]) {
+               if (!uverbs_cmd_table[command]) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                struct ib_udata uhw;
                size_t written_count = count;
 
-               if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
-                   !uverbs_ex_cmd_table[command]) {
+               if (!uverbs_ex_cmd_table[command]) {
                        ret = -ENOSYS;
                        goto out;
                }
@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
        .llseek  = no_llseek,
 #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
        .unlocked_ioctl = ib_uverbs_ioctl,
+       .compat_ioctl = ib_uverbs_ioctl,
 #endif
 };
 
@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
        .llseek  = no_llseek,
 #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
        .unlocked_ioctl = ib_uverbs_ioctl,
+       .compat_ioctl = ib_uverbs_ioctl,
 #endif
 };
 
index cab0ac3556eb0392b77ef7309b2da732ee06cfc4..df1360e6774f4e3d05631b353cf0f4352037832f 100644 (file)
@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
                uverbs_attr_get(ctx, UVERBS_UHW_OUT);
 
        if (!IS_ERR(uhw_in)) {
-               udata->inbuf = uhw_in->ptr_attr.ptr;
                udata->inlen = uhw_in->ptr_attr.len;
+               if (uverbs_attr_ptr_is_inline(uhw_in))
+                       udata->inbuf = &uhw_in->uattr->data;
+               else
+                       udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
        } else {
                udata->inbuf = NULL;
                udata->inlen = 0;
        }
 
        if (!IS_ERR(uhw_out)) {
-               udata->outbuf = uhw_out->ptr_attr.ptr;
+               udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
                udata->outlen = uhw_out->ptr_attr.len;
        } else {
                udata->outbuf = NULL;
@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
        cq->res.type = RDMA_RESTRACK_CQ;
        rdma_restrack_add(&cq->res);
 
-       ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
+       ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
+                            sizeof(cq->cqe));
        if (ret)
                goto err_cq;
 
@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
        resp.comp_events_reported  = obj->comp_events_reported;
        resp.async_events_reported = obj->async_events_reported;
 
-       return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
+       return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
 }
 
 static DECLARE_UVERBS_METHOD(
index 16ebc6372c31abe449f4e892d26e2d384cec56fc..93025d2009b89f8679a19001357b53bdc4733184 100644 (file)
@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
        if (qp_init_attr->cap.max_rdma_ctxs)
                rdma_rw_init_qp(device, qp_init_attr);
 
-       qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
+       qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
        if (IS_ERR(qp))
                return qp;
 
@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
        }
 
        qp->real_qp    = qp;
-       qp->uobject    = NULL;
        qp->qp_type    = qp_init_attr->qp_type;
        qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
 
index ca32057e886f00af4287cbe586c81866a3367ae4..3eb7a8387116d4653cda1afc75a1ab8cabd29f65 100644 (file)
@@ -120,7 +120,6 @@ struct bnxt_re_dev {
 #define BNXT_RE_FLAG_HAVE_L2_REF               3
 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN           4
 #define BNXT_RE_FLAG_QOS_WORK_REG              5
-#define BNXT_RE_FLAG_TASK_IN_PROG              6
 #define BNXT_RE_FLAG_ISSUE_ROCE_STATS          29
        struct net_device               *netdev;
        unsigned int                    version, major, minor;
@@ -158,6 +157,7 @@ struct bnxt_re_dev {
        atomic_t                        srq_count;
        atomic_t                        mr_count;
        atomic_t                        mw_count;
+       atomic_t                        sched_count;
        /* Max of 2 lossless traffic class supported per port */
        u16                             cosq[2];
 
index ae9e9ff54826b322c97c061d1d6cc51c240f570e..643174d949a8c979a1f5878a258933ba7a939645 100644 (file)
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
        ib_attr->max_pd = dev_attr->max_pd;
        ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
        ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
-       if (dev_attr->is_atomic) {
-               ib_attr->atomic_cap = IB_ATOMIC_HCA;
-               ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
-       }
+       ib_attr->atomic_cap = IB_ATOMIC_NONE;
+       ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
 
        ib_attr->max_ee_rd_atom = 0;
        ib_attr->max_res_rd_atom = 0;
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
        return 0;
 }
 
+static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+       __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->scq->cq_lock, flags);
+       if (qp->rcq != qp->scq)
+               spin_lock(&qp->rcq->cq_lock);
+       else
+               __acquire(&qp->rcq->cq_lock);
+
+       return flags;
+}
+
+static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+                              unsigned long flags)
+       __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
+{
+       if (qp->rcq != qp->scq)
+               spin_unlock(&qp->rcq->cq_lock);
+       else
+               __release(&qp->rcq->cq_lock);
+       spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+}
+
 /* Queue Pairs */
 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
 {
        struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
        struct bnxt_re_dev *rdev = qp->rdev;
        int rc;
+       unsigned int flags;
 
        bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
-       bnxt_qplib_del_flush_qp(&qp->qplib_qp);
        rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
        if (rc) {
                dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
                return rc;
        }
+
+       flags = bnxt_re_lock_cqs(qp);
+       bnxt_qplib_clean_qp(&qp->qplib_qp);
+       bnxt_re_unlock_cqs(qp, flags);
+       bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
+
        if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
                rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
                                           &rdev->sqp_ah->qplib_ah);
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
                        return rc;
                }
 
-               bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+               bnxt_qplib_clean_qp(&qp->qplib_qp);
                rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
                                           &rdev->qp1_sqp->qplib_qp);
                if (rc) {
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
                        goto fail;
                }
                qp->qplib_qp.scq = &cq->qplib_cq;
+               qp->scq = cq;
        }
 
        if (qp_init_attr->recv_cq) {
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
                        goto fail;
                }
                qp->qplib_qp.rcq = &cq->qplib_cq;
+               qp->rcq = cq;
        }
 
        if (qp_init_attr->srq) {
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
                rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
                if (rc) {
                        dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
-                       goto fail;
+                       goto free_umem;
                }
        }
 
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
        return &qp->ib_qp;
 qp_destroy:
        bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+free_umem:
+       if (udata) {
+               if (qp->rumem)
+                       ib_umem_release(qp->rumem);
+               if (qp->sumem)
+                       ib_umem_release(qp->sumem);
+       }
 fail:
        kfree(qp);
        return ERR_PTR(rc);
@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
                        dev_dbg(rdev_to_dev(rdev),
                                "Move QP = %p out of flush list\n",
                                qp);
-                       bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+                       bnxt_qplib_clean_qp(&qp->qplib_qp);
                }
        }
        if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
index 423ebe012f957fa5f1007e204f5831725ec885aa..b88a48d43a9dddd49c210d0a16779f3d4d0369b9 100644 (file)
@@ -89,6 +89,8 @@ struct bnxt_re_qp {
        /* QP1 */
        u32                     send_psn;
        struct ib_ud_header     qp1_hdr;
+       struct bnxt_re_cq       *scq;
+       struct bnxt_re_cq       *rcq;
 };
 
 struct bnxt_re_cq {
index 508d00a5a1066cabd3e6ac7fc4855abbf19235e8..33a448036c2ebd99217a40b9bb9d6372a798af0d 100644 (file)
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
        mutex_unlock(&bnxt_re_dev_lock);
 
        synchronize_rcu();
-       flush_workqueue(bnxt_re_wq);
 
        ib_dealloc_device(&rdev->ibdev);
        /* rdev is gone */
@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
                break;
        }
        smp_mb__before_atomic();
-       clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+       atomic_dec(&rdev->sched_count);
        kfree(re_work);
 }
 
@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                /* netdev notifier will call NETDEV_UNREGISTER again later since
                 * we are still holding the reference to the netdev
                 */
-               if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+               if (atomic_read(&rdev->sched_count) > 0)
                        goto exit;
                bnxt_re_ib_unreg(rdev, false);
                bnxt_re_remove_one(rdev);
@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                        re_work->vlan_dev = (real_dev == netdev ?
                                             NULL : netdev);
                        INIT_WORK(&re_work->work, bnxt_re_task);
-                       set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+                       atomic_inc(&rdev->sched_count);
                        queue_work(bnxt_re_wq, &re_work->work);
                }
        }
@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
        */
        list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
                dev_info(rdev_to_dev(rdev), "Unregistering Device");
+               /*
+                * Flush out any scheduled tasks before destroying the
+                * resources
+                */
+               flush_workqueue(bnxt_re_wq);
                bnxt_re_dev_stop(rdev);
                bnxt_re_ib_unreg(rdev, true);
                bnxt_re_remove_one(rdev);
index 1b0e94697fe34ceea2f7218deb394944458fcc78..3ea5b9624f6b79c3f54c6d069b4af7ab133d1f31 100644 (file)
@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
        }
 }
 
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
 {
        unsigned long flags;
 
@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct cmdq_destroy_qp req;
        struct creq_destroy_qp_resp resp;
-       unsigned long flags;
        u16 cmd_flags = 0;
        int rc;
 
@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
                return rc;
        }
 
-       /* Must walk the associated CQs to nullified the QP ptr */
-       spin_lock_irqsave(&qp->scq->hwq.lock, flags);
-
-       __clean_cq(qp->scq, (u64)(unsigned long)qp);
-
-       if (qp->rcq && qp->rcq != qp->scq) {
-               spin_lock(&qp->rcq->hwq.lock);
-               __clean_cq(qp->rcq, (u64)(unsigned long)qp);
-               spin_unlock(&qp->rcq->hwq.lock);
-       }
-
-       spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
+       return 0;
+}
 
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+                           struct bnxt_qplib_qp *qp)
+{
        bnxt_qplib_free_qp_hdr_buf(res, qp);
        bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
        kfree(qp->sq.swq);
@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
        if (qp->orrq.max_elements)
                bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
 
-       return 0;
 }
 
 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
index 211b27a8f9e275a88464f4de3d14292764038739..ca0a2ffa3509093a7953a80ea310f3f9314ace03 100644 (file)
@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+                           struct bnxt_qplib_qp *qp);
 void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
                                struct bnxt_qplib_sge *sge);
 void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
 void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
 void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
                                 unsigned long *flags);
 void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
index c015c1861351ae91d1455b97ee493a97e5ee5645..03057983341f78b251370888095a6c9492112040 100644 (file)
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
 
 /* Device */
 
-static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
-{
-       int rc;
-       u16 pcie_ctl2;
-
-       rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
-                                      &pcie_ctl2);
-       if (rc)
-               return false;
-       return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
-}
-
 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
                                     char *fw_ver)
 {
@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
                attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
        }
 
-       attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
+       attr->is_atomic = 0;
 bail:
        bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
        return rc;
index faa9478c14a6bda55d0fb518f03598d86454a259..f95b97646c25268c38f93807b37e4af18cee218b 100644 (file)
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
        struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+       struct pvrdma_create_cq_resp cq_resp = {0};
        struct pvrdma_create_cq ucmd;
 
        BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
 
        cq->ibcq.cqe = resp->cqe;
        cq->cq_handle = resp->cq_handle;
+       cq_resp.cqn = resp->cq_handle;
        spin_lock_irqsave(&dev->cq_tbl_lock, flags);
        dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
        spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
                cq->uar = &(to_vucontext(context)->uar);
 
                /* Copy udata back. */
-               if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+               if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
                        dev_warn(&dev->pdev->dev,
                                 "failed to copy back udata\n");
                        pvrdma_destroy_cq(&cq->ibcq);
index 5acebb1ef631ae0070d28917530345689a2654de..af235967a9c2e9ac366c943c10f11b8c2003e7c5 100644 (file)
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
        struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+       struct pvrdma_create_srq_resp srq_resp = {0};
        struct pvrdma_create_srq ucmd;
        unsigned long flags;
        int ret;
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
        }
 
        srq->srq_handle = resp->srqn;
+       srq_resp.srqn = resp->srqn;
        spin_lock_irqsave(&dev->srq_tbl_lock, flags);
        dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
        /* Copy udata back. */
-       if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
+       if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
                dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
                pvrdma_destroy_srq(&srq->ibsrq);
                return ERR_PTR(-EINVAL);
index 16b96616ef7e61b074d339e6ec8dd5fbab4f2c5e..a51463cd2f374729cf57a448a26d4be06fc0c342 100644 (file)
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
        struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+       struct pvrdma_alloc_pd_resp pd_resp = {0};
        int ret;
        void *ptr;
 
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
        pd->privileged = !context;
        pd->pd_handle = resp->pd_handle;
        pd->pdn = resp->pd_handle;
+       pd_resp.pdn = resp->pd_handle;
 
        if (context) {
-               if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+               if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
                        dev_warn(&dev->pdev->dev,
                                 "failed to copy back protection domain\n");
                        pvrdma_dealloc_pd(&pd->ibpd);
index 11f74cbe6660bbe0aa33fc7db2bac09bb2f7ad9e..ea302b0546016886e691fdaa38d85051ba36f23f 100644 (file)
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
 
-       WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
-       WARN_ONCE(!priv->path_dentry, "null path debug file\n");
        debugfs_remove(priv->mcg_dentry);
        debugfs_remove(priv->path_dentry);
        priv->mcg_dentry = priv->path_dentry = NULL;
index 35a408d0ae4fae9e4ea33e184dfc60a6d0ddc0b4..99bc9bd64b9ecc1de640cba1432a750db6e9491f 100644 (file)
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                         * for example, an "address" value of 0x12345f000 will
                         * flush from 0x123440000 to 0x12347ffff (256KiB). */
                        unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
-                       unsigned long mask = __rounddown_pow_of_two(address ^ last);;
+                       unsigned long mask = __rounddown_pow_of_two(address ^ last);
 
                        desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
                } else {
index 1a46b41dac7018bbf43e1dc610abeacab21cb7a1..6422846b546ed27122dc277bd65568421e6986fc 100644 (file)
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
 static void search_free(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, cl);
-       bio_complete(s);
 
        if (s->iop.bio)
                bio_put(s->iop.bio);
 
+       bio_complete(s);
        closure_debug_destroy(cl);
        mempool_free(s, s->d->c->search);
 }
index 312895788036705cb0426d1af5b5662a4797faf3..4d1d8dfb2d2a4d1de7d6ee59e2b8d802a68827e5 100644 (file)
@@ -1274,7 +1274,7 @@ static int flash_devs_run(struct cache_set *c)
        struct uuid_entry *u;
 
        for (u = c->uuids;
-            u < c->uuids + c->devices_max_used && !ret;
+            u < c->uuids + c->nr_uuids && !ret;
             u++)
                if (UUID_FLASH_ONLY(u))
                        ret = flash_dev_run(c, u);
index e40065bdbfc84ef8455dbcc88d6a3208a6780571..0a7e99d62c69048d7000d3dc6bbcaa3320c742b5 100644 (file)
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev)
                seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
        }
        rcu_read_unlock();
-       seq_printf (seq, "]");
+       seq_putc(seq, ']');
 }
 
 static int multipath_congested(struct mddev *mddev, int bits)
index bc67ab6844f02d540cf4ae3725ebdad13ee568b8..254e44e44668f5fff8cc2d95bdb3b682450a204f 100644 (file)
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        struct bio *bio;
        int ff = 0;
 
+       if (!page)
+               return;
+
        if (test_bit(Faulty, &rdev->flags))
                return;
 
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev)
         * the only valid external interface is through the md
         * device.
         */
+       mddev->has_superblocks = false;
        rdev_for_each(rdev, mddev) {
                if (test_bit(Faulty, &rdev->flags))
                        continue;
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev)
                                set_disk_ro(mddev->gendisk, 1);
                }
 
+               if (rdev->sb_page)
+                       mddev->has_superblocks = true;
+
                /* perform some consistency tests on the device.
                 * We don't want the data to overlap the metadata,
                 * Internal Bitmap issues have been handled elsewhere.
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev)
        }
        if (mddev->sync_set == NULL) {
                mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
-               if (!mddev->sync_set)
-                       return -ENOMEM;
+               if (!mddev->sync_set) {
+                       err = -ENOMEM;
+                       goto abort;
+               }
        }
 
        spin_lock(&pers_lock);
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev)
        sysfs_notify_dirent_safe(mddev->sysfs_action);
        sysfs_notify(&mddev->kobj, NULL, "degraded");
        return 0;
+
+abort:
+       if (mddev->bio_set) {
+               bioset_free(mddev->bio_set);
+               mddev->bio_set = NULL;
+       }
+       if (mddev->sync_set) {
+               bioset_free(mddev->sync_set);
+               mddev->sync_set = NULL;
+       }
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(md_run);
 
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync);
 bool md_write_start(struct mddev *mddev, struct bio *bi)
 {
        int did_change = 0;
+
        if (bio_data_dir(bi) != WRITE)
                return true;
 
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
        rcu_read_unlock();
        if (did_change)
                sysfs_notify_dirent_safe(mddev->sysfs_state);
+       if (!mddev->has_superblocks)
+               return true;
        wait_event(mddev->sb_wait,
                   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
                   mddev->suspended);
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread)
        set_mask_bits(&mddev->sb_flags, 0,
                      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
 
+       if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+                       !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+                       mddev->delta_disks > 0 &&
+                       mddev->pers->finish_reshape &&
+                       mddev->pers->size &&
+                       mddev->queue) {
+               mddev_lock_nointr(mddev);
+               md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
+               mddev_unlock(mddev);
+               set_capacity(mddev->gendisk, mddev->array_sectors);
+               revalidate_disk(mddev->gendisk);
+       }
+
        spin_lock(&mddev->lock);
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                /* We completed so min/max setting can be forgotten if used. */
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev,
        int removed = 0;
        bool remove_some = false;
 
+       if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+               /* Mustn't remove devices when resync thread is running */
+               return 0;
+
        rdev_for_each(rdev, mddev) {
                if ((this == NULL || rdev == this) &&
                    rdev->raid_disk >= 0 &&
index 58cd20a5e85edb1db853661dcd749d99682a796c..fbc925cce8107019dcc4b2aa5310c7d100431a46 100644 (file)
@@ -468,6 +468,8 @@ struct mddev {
        void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
        struct md_cluster_info          *cluster_info;
        unsigned int                    good_device_nr; /* good device num within cluster raid */
+
+       bool    has_superblocks:1;
 };
 
 enum recovery_flags {
index b2eae332e1a29ee585c04ba6d22c2a23b8b99ed3..fe872dc6712ed0c5c00caa60e5f152876f0b1025 100644 (file)
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
 
        bio_copy_data(behind_bio, bio);
 skip_copy:
-       r1_bio->behind_master_bio = behind_bio;;
+       r1_bio->behind_master_bio = behind_bio;
        set_bit(R1BIO_BehindIO, &r1_bio->state);
 
        return;
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
                        struct md_rdev *repl =
                                conf->mirrors[conf->raid_disks + number].rdev;
                        freeze_array(conf, 0);
+                       if (atomic_read(&repl->nr_pending)) {
+                               /* It means that some queued IO of retry_list
+                                * hold repl. Thus, we cannot set replacement
+                                * as NULL, avoiding rdev NULL pointer
+                                * dereference in sync_request_write and
+                                * handle_write_finished.
+                                */
+                               err = -EBUSY;
+                               unfreeze_array(conf);
+                               goto abort;
+                       }
                        clear_bit(Replacement, &repl->flags);
                        p->rdev = repl;
                        conf->mirrors[conf->raid_disks + number].rdev = NULL;
index c7294e7557e038bf5248e8c8bf9ce84cb6afad58..eb84bc68e2fd4c31cc996bc4d399183cd852b8f2 100644 (file)
 #define BARRIER_BUCKETS_NR_BITS                (PAGE_SHIFT - ilog2(sizeof(atomic_t)))
 #define BARRIER_BUCKETS_NR             (1<<BARRIER_BUCKETS_NR_BITS)
 
+/* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk.
+ * There are three safe ways to access raid1_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery is known to be happening - i.e. in code that is
+ *    called as part of performing resync/recovery.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the
+ *    RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
 struct raid1_info {
        struct md_rdev  *rdev;
        sector_t        head_position;
index 99c9207899a777c615880f286a6f617ffc4a70bf..c5e6c60fc0d41b53a578874087ae87259e3ebcce 100644 (file)
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
 #define RESYNC_WINDOW (1024*1024)
 /* maximum number of concurrent requests, memory permitting */
 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
-#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW)
 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 
 /*
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                for (m = 0; m < conf->copies; m++) {
                        int dev = r10_bio->devs[m].devnum;
                        rdev = conf->mirrors[dev].rdev;
-                       if (r10_bio->devs[m].bio == NULL)
+                       if (r10_bio->devs[m].bio == NULL ||
+                               r10_bio->devs[m].bio->bi_end_io == NULL)
                                continue;
                        if (!r10_bio->devs[m].bio->bi_status) {
                                rdev_clear_badblocks(
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                                        md_error(conf->mddev, rdev);
                        }
                        rdev = conf->mirrors[dev].replacement;
-                       if (r10_bio->devs[m].repl_bio == NULL)
+                       if (r10_bio->devs[m].repl_bio == NULL ||
+                               r10_bio->devs[m].repl_bio->bi_end_io == NULL)
                                continue;
 
                        if (!r10_bio->devs[m].repl_bio->bi_status) {
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev)
                if (fc > 1 || fo > 0) {
                        pr_err("only near layout is supported by clustered"
                                " raid10\n");
-                       goto out;
+                       goto out_free_conf;
                }
        }
 
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev)
                return;
 
        if (mddev->delta_disks > 0) {
-               sector_t size = raid10_size(mddev, 0, 0);
-               md_set_array_sectors(mddev, size);
                if (mddev->recovery_cp > mddev->resync_max_sectors) {
                        mddev->recovery_cp = mddev->resync_max_sectors;
                        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
                }
-               mddev->resync_max_sectors = size;
-               if (mddev->queue) {
-                       set_capacity(mddev->gendisk, mddev->array_sectors);
-                       revalidate_disk(mddev->gendisk);
-               }
+               mddev->resync_max_sectors = mddev->array_sectors;
        } else {
                int d;
                rcu_read_lock();
index db2ac22ac1b42801a14e5eab8641109a3aa61505..e2e8840de9bfab734e9238bc59ca8e9058c7de0b 100644 (file)
@@ -2,6 +2,19 @@
 #ifndef _RAID10_H
 #define _RAID10_H
 
+/* Note: raid10_info.rdev can be set to NULL asynchronously by
+ * raid10_remove_disk.
+ * There are three safe ways to access raid10_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code
+ *    that is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the
+ *    RCU lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if it has
+ * been incremented, the pointer is put back in .rdev.
+ */
+
 struct raid10_info {
        struct md_rdev  *rdev, *replacement;
        sector_t        head_position;
index 0c76bcedfc1cbd8b0af33008652095e3862f22c9..a001808a2b77da16bc2ae3c1d4aa32b5f944a939 100644 (file)
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf);
 extern void ppl_stripe_write_finished(struct stripe_head *sh);
 extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
+extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
        if (conf->log)
                ret = r5l_handle_flush_request(conf->log, bio);
        else if (raid5_has_ppl(conf))
-               ret = 0;
+               ret = ppl_handle_flush_request(conf->log, bio);
 
        return ret;
 }
index 2764c2290062862a607dca9145ef8f0e24cb5d89..42890a08375bc73b6bcdba7961e9c88062dded5d 100644 (file)
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce)
        }
 }
 
+int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio)
+{
+       if (bio->bi_iter.bi_size == 0) {
+               bio_endio(bio);
+               return 0;
+       }
+       bio->bi_opf &= ~REQ_PREFLUSH;
+       return -EAGAIN;
+}
+
 void ppl_stripe_write_finished(struct stripe_head *sh)
 {
        struct ppl_io_unit *io;
index 50d01144b80535e2e937c16021cdeeba632b201e..b5d2601483e34fa97fc8cfb7faeff6f014c0e035 100644 (file)
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 static int grow_stripes(struct r5conf *conf, int num)
 {
        struct kmem_cache *sc;
+       size_t namelen = sizeof(conf->cache_name[0]);
        int devs = max(conf->raid_disks, conf->previous_raid_disks);
 
        if (conf->mddev->gendisk)
-               sprintf(conf->cache_name[0],
+               snprintf(conf->cache_name[0], namelen,
                        "raid%d-%s", conf->level, mdname(conf->mddev));
        else
-               sprintf(conf->cache_name[0],
+               snprintf(conf->cache_name[0], namelen,
                        "raid%d-%p", conf->level, conf->mddev);
-       sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
+       snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
 
        conf->active_name = 0;
        sc = kmem_cache_create(conf->cache_name[conf->active_name],
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf)
 
        log_exit(conf);
 
-       if (conf->shrinker.nr_deferred)
-               unregister_shrinker(&conf->shrinker);
-
+       unregister_shrinker(&conf->shrinker);
        free_thread_groups(conf);
        shrink_stripes(conf);
        raid5_free_percpu(conf);
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
 
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 
-               if (mddev->delta_disks > 0) {
-                       md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
-                       if (mddev->queue) {
-                               set_capacity(mddev->gendisk, mddev->array_sectors);
-                               revalidate_disk(mddev->gendisk);
-                       }
-               } else {
+               if (mddev->delta_disks <= 0) {
                        int d;
                        spin_lock_irq(&conf->device_lock);
                        mddev->degraded = raid5_calc_degraded(conf);
index 2e6123825095296555e8a73a52d63872f2fa2bff..3f8da26032accce9fdf492b58fc120a94ac31582 100644 (file)
@@ -450,6 +450,18 @@ enum {
  * HANDLE gets cleared if stripe_handle leaves nothing locked.
  */
 
+/* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk.
+ * There are three safe ways to access disk_info.rdev.
+ * 1/ when holding mddev->reconfig_mutex
+ * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that
+ *    is called as part of performing resync/recovery/reshape.
+ * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
+ *    and if it is non-NULL, increment rdev->nr_pending before dropping the RCU
+ *    lock.
+ * When .rdev is set to NULL, the nr_pending count checked again and if
+ * it has been incremented, the pointer is put back in .rdev.
+ */
+
 struct disk_info {
        struct md_rdev  *rdev, *replacement;
        struct page     *extra_page; /* extra page to use in prexor */
index 145e12bfb8190ab8274645c43493b9978fff6e1a..372c074bb1b90fa0d1010d697faa18a7005231d7 100644 (file)
@@ -147,6 +147,8 @@ config DVB_CORE
 config DVB_MMAP
        bool "Enable DVB memory-mapped API (EXPERIMENTAL)"
        depends on DVB_CORE
+       depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE
+       select VIDEOBUF2_VMALLOC
        default n
        help
          This option enables DVB experimental memory-mapped API, with
index 5df05250de947d7cf2bc396dc5104851f52fcb36..17c32ea58395d78f9fafe5525bbfbf1bdfe4b690 100644 (file)
@@ -3,6 +3,9 @@ config VIDEOBUF2_CORE
        select DMA_SHARED_BUFFER
        tristate
 
+config VIDEOBUF2_V4L2
+       tristate
+
 config VIDEOBUF2_MEMOPS
        tristate
        select FRAME_VECTOR
index 19de5ccda20b3c9de62a616077cfd11c6d743c58..77bebe8b202f46695ef239caaec98a567e088a03 100644 (file)
@@ -1,5 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+videobuf2-common-objs := videobuf2-core.o
 
-obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o
+ifeq ($(CONFIG_TRACEPOINTS),y)
+  videobuf2-common-objs += vb2-trace.o
+endif
+
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
+obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
 obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
 obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
 obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
index 3a105d82019a0288b37f7a7143b1e8c79adffb42..62b028ded9f784438175969610608e60f35a9353 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
-dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o
+dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o
 
 dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o                 \
                 dvb_ca_en50221.o dvb_frontend.o                \
index 6d53af00190e396fda03bab661b56d4e4a1fc4a2..61a750fae4653be8da2ee47e60ae94e36e2143e6 100644 (file)
@@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        struct dmx_frontend *front;
-#ifndef DVB_MMAP
        bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        dprintk("%s\n", __func__);
 
@@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
                return -ENODEV;
        }
 
-#ifndef DVB_MMAP
+       dmxdev->may_do_mmap = 0;
+
+       /*
+        * The logic here is a little tricky due to the ifdef.
+        *
+        * The ringbuffer is used for both read and mmap.
+        *
+        * It is not needed, however, on two situations:
+        *      - Write devices (access with O_WRONLY);
+        *      - For duplex device nodes, opened with O_RDWR.
+        */
+
        if ((file->f_flags & O_ACCMODE) == O_RDONLY)
                need_ringbuffer = true;
-#else
-       if ((file->f_flags & O_ACCMODE) == O_RDWR) {
+       else if ((file->f_flags & O_ACCMODE) == O_RDWR) {
                if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) {
+#ifdef CONFIG_DVB_MMAP
+                       dmxdev->may_do_mmap = 1;
+                       need_ringbuffer = true;
+#else
                        mutex_unlock(&dmxdev->mutex);
                        return -EOPNOTSUPP;
+#endif
                }
        }
-#endif
 
        if (need_ringbuffer) {
                void *mem;
@@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
                        return -ENOMEM;
                }
                dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
-               dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
-                            file->f_flags & O_NONBLOCK);
+               if (dmxdev->may_do_mmap)
+                       dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr",
+                                    file->f_flags & O_NONBLOCK);
                dvbdev->readers--;
        }
 
@@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
-#ifndef DVB_MMAP
-       bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        mutex_lock(&dmxdev->mutex);
 
@@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file)
                dmxdev->demux->connect_frontend(dmxdev->demux,
                                                dmxdev->dvr_orig_fe);
        }
-#ifndef DVB_MMAP
-       if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-               need_ringbuffer = true;
-#endif
 
-       if (need_ringbuffer) {
-               if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
-                       dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
-               dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
+       if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+           dmxdev->may_do_mmap) {
+               if (dmxdev->may_do_mmap) {
+                       if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx))
+                               dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx);
+                       dvb_vb2_release(&dmxdev->dvr_vb2_ctx);
+               }
                dvbdev->readers++;
                if (dmxdev->dvr_buffer.data) {
                        void *mem = dmxdev->dvr_buffer.data;
@@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
 
 static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
                                       const u8 *buffer2, size_t buffer2_len,
-                                      struct dmx_section_filter *filter)
+                                      struct dmx_section_filter *filter,
+                                      u32 *buffer_flags)
 {
        struct dmxdev_filter *dmxdevfilter = filter->priv;
        int ret;
@@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
        dprintk("section callback %*ph\n", 6, buffer1);
        if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) {
                ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
-                                         buffer1, buffer1_len);
+                                         buffer1, buffer1_len,
+                                         buffer_flags);
                if (ret == buffer1_len)
                        ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx,
-                                                 buffer2, buffer2_len);
+                                                 buffer2, buffer2_len,
+                                                 buffer_flags);
        } else {
                ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer,
                                              buffer1, buffer1_len);
@@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
 
 static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
                                  const u8 *buffer2, size_t buffer2_len,
-                                 struct dmx_ts_feed *feed)
+                                 struct dmx_ts_feed *feed,
+                                 u32 *buffer_flags)
 {
        struct dmxdev_filter *dmxdevfilter = feed->priv;
        struct dvb_ringbuffer *buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        struct dvb_vb2_ctx *ctx;
 #endif
        int ret;
@@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
        if (dmxdevfilter->params.pes.output == DMX_OUT_TAP ||
            dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
                buffer = &dmxdevfilter->buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
                ctx = &dmxdevfilter->vb2_ctx;
 #endif
        } else {
                buffer = &dmxdevfilter->dev->dvr_buffer;
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
                ctx = &dmxdevfilter->dev->dvr_vb2_ctx;
 #endif
        }
 
        if (dvb_vb2_is_streaming(ctx)) {
-               ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len);
+               ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len,
+                                         buffer_flags);
                if (ret == buffer1_len)
-                       ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len);
+                       ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len,
+                                                 buffer_flags);
        } else {
                if (buffer->error) {
                        spin_unlock(&dmxdevfilter->dev->lock);
@@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
        mutex_init(&dmxdevfilter->mutex);
        file->private_data = dmxdevfilter;
 
+#ifdef CONFIG_DVB_MMAP
+       dmxdev->may_do_mmap = 1;
+#else
+       dmxdev->may_do_mmap = 0;
+#endif
+
        dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
        dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter",
                     file->f_flags & O_NONBLOCK);
@@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file,
                mutex_unlock(&dmxdevfilter->mutex);
                break;
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        case DMX_REQBUFS:
                if (mutex_lock_interruptible(&dmxdevfilter->mutex)) {
                        mutex_unlock(&dmxdev->mutex);
@@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file,
                break;
 #endif
        default:
-               ret = -EINVAL;
+               ret = -ENOTTY;
                break;
        }
        mutex_unlock(&dmxdev->mutex);
@@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
 static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct dmxdev_filter *dmxdevfilter = file->private_data;
        struct dmxdev *dmxdev = dmxdevfilter->dev;
        int ret;
 
+       if (!dmxdev->may_do_mmap)
+               return -ENOTTY;
+
        if (mutex_lock_interruptible(&dmxdev->mutex))
                return -ERESTARTSYS;
 
@@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = {
        .release = dvb_demux_release,
        .poll = dvb_demux_poll,
        .llseek = default_llseek,
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        .mmap = dvb_demux_mmap,
 #endif
 };
@@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
                ret = dvb_dvr_set_buffer_size(dmxdev, arg);
                break;
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        case DMX_REQBUFS:
                ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg);
                break;
@@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file,
                break;
 #endif
        default:
-               ret = -EINVAL;
+               ret = -ENOTTY;
                break;
        }
        mutex_unlock(&dmxdev->mutex);
@@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        __poll_t mask = 0;
-#ifndef DVB_MMAP
-       bool need_ringbuffer = false;
-#else
-       const bool need_ringbuffer = true;
-#endif
 
        dprintk("%s\n", __func__);
 
@@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &dmxdev->dvr_buffer.queue, wait);
 
-#ifndef DVB_MMAP
-       if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-               need_ringbuffer = true;
-#endif
-       if (need_ringbuffer) {
+       if (((file->f_flags & O_ACCMODE) == O_RDONLY) ||
+           dmxdev->may_do_mmap) {
                if (dmxdev->dvr_buffer.error)
                        mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR);
 
@@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
        return mask;
 }
 
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
 static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dmxdev *dmxdev = dvbdev->priv;
        int ret;
 
+       if (!dmxdev->may_do_mmap)
+               return -ENOTTY;
+
        if (dmxdev->exit)
                return -ENODEV;
 
@@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = {
        .release = dvb_dvr_release,
        .poll = dvb_dvr_poll,
        .llseek = default_llseek,
-#ifdef DVB_MMAP
+#ifdef CONFIG_DVB_MMAP
        .mmap = dvb_dvr_mmap,
 #endif
 };
index 210eed0269b085d5b8c99d5f8941928cf0613a30..f45091246bdca90d870dc0a10bd35e8fd4d73eea 100644 (file)
@@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
                dprintk(x);                             \
 } while (0)
 
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+#  define dprintk_sect_loss(x...) dprintk(x)
+#else
+#  define dprintk_sect_loss(x...)
+#endif
+
+#define set_buf_flags(__feed, __flag)                  \
+       do {                                            \
+               (__feed)->buffer_flags |= (__flag);     \
+       } while (0)
+
 /******************************************************************************
  * static inlined helper functions
  ******************************************************************************/
@@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 {
        int count = payload(buf);
        int p;
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        int ccok;
        u8 cc;
-#endif
 
        if (count == 0)
                return -1;
 
        p = 188 - count;
 
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
        feed->cc = cc;
-       if (!ccok)
-               dprintk("missed packet: %d instead of %d!\n",
-                       cc, (feed->cc + 1) & 0x0f);
-#endif
+       if (!ccok) {
+               set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+               dprintk_sect_loss("missed packet: %d instead of %d!\n",
+                                 cc, (feed->cc + 1) & 0x0f);
+       }
 
        if (buf[1] & 0x40)      // PUSI ?
                feed->peslen = 0xfffa;
 
        feed->peslen += count;
 
-       return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
+       return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts,
+                          &feed->buffer_flags);
 }
 
 static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
@@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
                return 0;
 
        return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
-                           NULL, 0, &f->filter);
+                           NULL, 0, &f->filter, &feed->buffer_flags);
 }
 
 static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
        if (sec->check_crc) {
                section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0);
                if (section_syntax_indicator &&
-                   demux->check_crc32(feed, sec->secbuf, sec->seclen))
+                   demux->check_crc32(feed, sec->secbuf, sec->seclen)) {
+                       set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD);
                        return -1;
+               }
        }
 
        do {
@@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
 {
        struct dmx_section_feed *sec = &feed->feed.sec;
 
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
        if (sec->secbufp < sec->tsfeedp) {
                int n = sec->tsfeedp - sec->secbufp;
 
@@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
                 * but just first and last.
                 */
                if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
-                       dprintk("section ts padding loss: %d/%d\n",
-                              n, sec->tsfeedp);
-                       dprintk("pad data: %*ph\n", n, sec->secbuf);
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("section ts padding loss: %d/%d\n",
+                                         n, sec->tsfeedp);
+                       dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf);
                }
        }
-#endif
 
        sec->tsfeedp = sec->secbufp = sec->seclen = 0;
        sec->secbuf = sec->secbuf_base;
@@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
                return 0;
 
        if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               dprintk("section buffer full loss: %d/%d\n",
-                       sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
-                       DMX_MAX_SECFEED_SIZE);
-#endif
+               set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+               dprintk_sect_loss("section buffer full loss: %d/%d\n",
+                                 sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
+                                 DMX_MAX_SECFEED_SIZE);
                len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp;
        }
 
@@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
                sec->seclen = seclen;
                sec->crc_val = ~0;
                /* dump [secbuf .. secbuf+seclen) */
-               if (feed->pusi_seen)
+               if (feed->pusi_seen) {
                        dvb_dmx_swfilter_section_feed(feed);
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               else
-                       dprintk("pusi not seen, discarding section data\n");
-#endif
+               } else {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("pusi not seen, discarding section data\n");
+               }
                sec->secbufp += seclen; /* secbufp and secbuf moving together is */
                sec->secbuf += seclen;  /* redundant but saves pointer arithmetic */
        }
@@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
        }
 
        if (!ccok || dc_i) {
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               if (dc_i)
-                       dprintk("%d frame with disconnect indicator\n",
+               if (dc_i) {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR);
+                       dprintk_sect_loss("%d frame with disconnect indicator\n",
                                cc);
-               else
-                       dprintk("discontinuity: %d instead of %d. %d bytes lost\n",
+               } else {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n",
                                cc, (feed->cc + 1) & 0x0f, count + 4);
+               }
                /*
-                * those bytes under sume circumstances will again be reported
+                * those bytes under some circumstances will again be reported
                 * in the following dvb_dmx_swfilter_section_new
                 */
-#endif
+
                /*
                 * Discontinuity detected. Reset pusi_seen to
                 * stop feeding of suspicious data until next PUSI=1 arrives
@@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                 * FIXME: does it make sense if the MPEG-TS is the one
                 *      reporting discontinuity?
                 */
+
                feed->pusi_seen = false;
                dvb_dmx_swfilter_section_new(feed);
        }
@@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                        dvb_dmx_swfilter_section_new(feed);
                        dvb_dmx_swfilter_section_copy_dump(feed, after,
                                                           after_len);
+               } else if (count > 0) {
+                       set_buf_flags(feed,
+                                     DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
+                       dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count);
                }
-#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
-               else if (count > 0)
-                       dprintk("PUSI=1 but %d bytes lost\n", count);
-#endif
        } else {
                /* PUSI=0 (is not set), no section boundary */
                dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count);
@@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed,
                        if (feed->ts_type & TS_PAYLOAD_ONLY)
                                dvb_dmx_swfilter_payload(feed, buf);
                        else
-                               feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+                               feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
+                                           &feed->buffer_flags);
                }
                /* Used only on full-featured devices */
                if (feed->ts_type & TS_DECODER)
@@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
        }
 
        if (buf[1] & 0x80) {
+               list_for_each_entry(feed, &demux->feed_list, list_head) {
+                       if ((feed->pid != pid) && (feed->pid != 0x2000))
+                               continue;
+                       set_buf_flags(feed, DMX_BUFFER_FLAG_TEI);
+               }
                dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
                                pid, buf[1]);
                /* data in this packet can't be trusted - drop it unless
@@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                                                (demux->cnt_storage[pid] + 1) & 0xf;
 
                                if ((buf[3] & 0xf) != demux->cnt_storage[pid]) {
+                                       list_for_each_entry(feed, &demux->feed_list, list_head) {
+                                               if ((feed->pid != pid) && (feed->pid != 0x2000))
+                                                       continue;
+                                               set_buf_flags(feed,
+                                                             DMX_BUFFER_PKT_COUNTER_MISMATCH);
+                                       }
+
                                        dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n",
                                                        pid, demux->cnt_storage[pid],
                                                        buf[3] & 0xf);
@@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
                if (feed->pid == pid)
                        dvb_dmx_swfilter_packet_type(feed, buf);
                else if (feed->pid == 0x2000)
-                       feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
+                       feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts,
+                                   &feed->buffer_flags);
        }
 }
 
@@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
 
        spin_lock_irqsave(&demux->lock, flags);
 
-       demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
+       demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts,
+                          &demux->feed->buffer_flags);
 
        spin_unlock_irqrestore(&demux->lock, flags);
 }
@@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
        feed->demux = demux;
        feed->pid = 0xffff;
        feed->peslen = 0xfffa;
+       feed->buffer_flags = 0;
 
        (*ts_feed) = &feed->feed.ts;
        (*ts_feed)->parent = dmx;
@@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
        dvbdmxfeed->cb.sec = callback;
        dvbdmxfeed->demux = dvbdmx;
        dvbdmxfeed->pid = 0xffff;
+       dvbdmxfeed->buffer_flags = 0;
        dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
        dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
        dvbdmxfeed->feed.sec.tsfeedp = 0;
index b6c7eec863b9232b5fb30d07b809a01298d85c22..ba39f9942e1db06f0eeadc6473287f641ff4af85 100644 (file)
@@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
 
 static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
                               const u8 *buffer2, size_t buffer2_len,
-                              struct dmx_ts_feed *feed)
+                              struct dmx_ts_feed *feed,
+                              u32 *buffer_flags)
 {
        struct net_device *dev = feed->priv;
 
@@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev,
 
 static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
                 const u8 *buffer2, size_t buffer2_len,
-                struct dmx_section_filter *filter)
+                struct dmx_section_filter *filter, u32 *buffer_flags)
 {
        struct net_device *dev = filter->priv;
 
index 763145d74e836aba5ae400e20522cce0c84a308d..b811adf88afa3424d14b6584969928d2b5cdae54 100644 (file)
@@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx)
 }
 
 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
-                       const unsigned char *src, int len)
+                       const unsigned char *src, int len,
+                       enum dmx_buffer_flags *buffer_flags)
 {
        unsigned long flags = 0;
        void *vbuf = NULL;
@@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
        unsigned char *psrc = (unsigned char *)src;
        int ll = 0;
 
-       dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len);
-       if (!src) {
-               dprintk(3, "[%s]:NULL pointer src\n", ctx->name);
-               /**normal case: This func is called twice from demux driver
-                * once with valid src pointer, second time with NULL pointer
-                */
+       /*
+        * normal case: This func is called twice from demux driver
+        * one with valid src pointer, second time with NULL pointer
+        */
+       if (!src || !len)
                return 0;
-       }
        spin_lock_irqsave(&ctx->slock, flags);
+       if (buffer_flags && *buffer_flags) {
+               ctx->flags |= *buffer_flags;
+               *buffer_flags = 0;
+       }
        while (todo) {
                if (!ctx->buf) {
                        if (list_empty(&ctx->dvb_q)) {
@@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
 
 int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
 {
+       unsigned long flags;
        int ret;
 
        ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking);
@@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
                dprintk(1, "[%s] errno=%d\n", ctx->name, ret);
                return ret;
        }
-       dprintk(5, "[%s] index=%d\n", ctx->name, b->index);
+
+       spin_lock_irqsave(&ctx->slock, flags);
+       b->count = ctx->count++;
+       b->flags = ctx->flags;
+       ctx->flags = 0;
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       dprintk(5, "[%s] index=%d, count=%d, flags=%d\n",
+               ctx->name, b->index, ctx->count, b->flags);
+
 
        return 0;
 }
index 50bce68ffd6646526fa86e1eaa5c65692b02a7b6..65d157fe76d19dad45ac94fa9eb21cbdad5dd575 100644 (file)
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
  * New users must use I2C client binding directly!
  */
 struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
-               struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
+                                     struct i2c_adapter *i2c,
+                                     struct i2c_adapter **tuner_i2c_adapter)
 {
        struct i2c_client *client;
        struct i2c_board_info board_info;
-       struct m88ds3103_platform_data pdata;
+       struct m88ds3103_platform_data pdata = {};
 
        pdata.clk = cfg->clock;
        pdata.i2c_wr_max = cfg->i2c_wr_max;
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
        case M88DS3103_CHIP_ID:
                break;
        default:
+               ret = -ENODEV;
+               dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
                goto err_kfree;
        }
 
index 3c1851984b907a54b22920dec24e865731006f96..2476d812f669476a286672de807009df8f4980fb 100644 (file)
@@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] =
        /* FIXME: Current api doesn't handle all VBI types, those not
           yet supported are placed under #if 0 */
 #if 0
-       {0x010, /* Teletext, SECAM, WST System A */
+       [0] = {0x010, /* Teletext, SECAM, WST System A */
                {V4L2_SLICED_TELETEXT_SECAM,6,23,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26,
                  0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
 #endif
-       {0x030, /* Teletext, PAL, WST System B */
+       [1] = {0x030, /* Teletext, PAL, WST System B */
                {V4L2_SLICED_TELETEXT_B,6,22,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b,
                  0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
 #if 0
-       {0x050, /* Teletext, PAL, WST System C */
+       [2] = {0x050, /* Teletext, PAL, WST System C */
                {V4L2_SLICED_TELETEXT_PAL_C,6,22,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
                  0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x070, /* Teletext, NTSC, WST System B */
+       [3] = {0x070, /* Teletext, NTSC, WST System B */
                {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x090, /* Tetetext, NTSC NABTS System C */
+       [4] = {0x090, /* Tetetext, NTSC NABTS System C */
                {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 }
        },
-       {0x0b0, /* Teletext, NTSC-J, NABTS System D */
+       [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */
                {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1},
                { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23,
                  0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 }
        },
-       {0x0d0, /* Closed Caption, PAL/SECAM */
+       [6] = {0x0d0, /* Closed Caption, PAL/SECAM */
                {V4L2_SLICED_CAPTION_625,22,22,1},
                { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
                  0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
        },
 #endif
-       {0x0f0, /* Closed Caption, NTSC */
+       [7] = {0x0f0, /* Closed Caption, NTSC */
                {V4L2_SLICED_CAPTION_525,21,21,1},
                { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02,
                  0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 }
        },
-       {0x110, /* Wide Screen Signal, PAL/SECAM */
+       [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */
                {V4L2_SLICED_WSS_625,23,23,1},
                { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42,
                  0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 }
        },
 #if 0
-       {0x130, /* Wide Screen Signal, NTSC C */
+       [9] = {0x130, /* Wide Screen Signal, NTSC C */
                {V4L2_SLICED_WSS_525,20,20,1},
                { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43,
                  0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 }
        },
-       {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
+       [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */
                {V4l2_SLICED_VITC_625,6,22,0},
                { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
                  0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
        },
-       {0x170, /* Vertical Interval Timecode (VITC), NTSC */
+       [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */
                {V4l2_SLICED_VITC_525,10,20,0},
                { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49,
                  0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 }
        },
 #endif
-       {0x190, /* Video Program System (VPS), PAL */
+       [12] = {0x190, /* Video Program System (VPS), PAL */
                {V4L2_SLICED_VPS,16,16,0},
                { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d,
                  0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 }
        },
        /* 0x1d0 User programmable */
-
-       /* End of struct */
-       { (u16)-1 }
 };
 
 static int tvp5150_write_inittab(struct v4l2_subdev *sd,
@@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd,
        return 0;
 }
 
-static int tvp5150_vdp_init(struct v4l2_subdev *sd,
-                               const struct i2c_vbi_ram_value *regs)
+static int tvp5150_vdp_init(struct v4l2_subdev *sd)
 {
        unsigned int i;
+       int j;
 
        /* Disable Full Field */
        tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0);
@@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
                tvp5150_write(sd, i, 0xff);
 
        /* Load Ram Table */
-       while (regs->reg != (u16)-1) {
+       for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) {
+               const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
                tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8);
                tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg);
 
                for (i = 0; i < 16; i++)
                        tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]);
-
-               regs++;
        }
        return 0;
 }
@@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd,
 static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
                                struct v4l2_sliced_vbi_cap *cap)
 {
-       const struct i2c_vbi_ram_value *regs = vbi_ram_default;
-       int line;
+       int line, i;
 
        dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
        memset(cap, 0, sizeof *cap);
 
-       while (regs->reg != (u16)-1 ) {
-               for (line=regs->type.ini_line;line<=regs->type.end_line;line++) {
+       for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
+               const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
+               for (line = regs->type.ini_line;
+                    line <= regs->type.end_line;
+                    line++) {
                        cap->service_lines[0][line] |= regs->type.vbi_type;
                }
                cap->service_set |= regs->type.vbi_type;
-
-               regs++;
        }
        return 0;
 }
@@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
  *     MSB = field2
  */
 static int tvp5150_set_vbi(struct v4l2_subdev *sd,
-                       const struct i2c_vbi_ram_value *regs,
                        unsigned int type,u8 flags, int line,
                        const int fields)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
        v4l2_std_id std = decoder->norm;
        u8 reg;
-       int pos = 0;
+       int i, pos = 0;
 
        if (std == V4L2_STD_ALL) {
                dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
        if (line < 6 || line > 27)
                return 0;
 
-       while (regs->reg != (u16)-1) {
+       for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) {
+               const struct i2c_vbi_ram_value *regs =  &vbi_ram_default[i];
+
+               if (!regs->type.vbi_type)
+                       continue;
+
                if ((type & regs->type.vbi_type) &&
                    (line >= regs->type.ini_line) &&
                    (line <= regs->type.end_line))
                        break;
-
-               regs++;
                pos++;
        }
 
-       if (regs->reg == (u16)-1)
-               return 0;
-
        type = pos | (flags & 0xf0);
        reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
 
@@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
        return type;
 }
 
-static int tvp5150_get_vbi(struct v4l2_subdev *sd,
-                       const struct i2c_vbi_ram_value *regs, int line)
+static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
        v4l2_std_id std = decoder->norm;
@@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
                        return 0;
                }
                pos = ret & 0x0f;
-               if (pos < 0x0f)
-                       type |= regs[pos].type.vbi_type;
+               if (pos < ARRAY_SIZE(vbi_ram_default))
+                       type |= vbi_ram_default[pos].type.vbi_type;
        }
 
        return type;
@@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
        tvp5150_write_inittab(sd, tvp5150_init_default);
 
        /* Initializes VDP registers */
-       tvp5150_vdp_init(sd, vbi_ram_default);
+       tvp5150_vdp_init(sd);
 
        /* Selects decoder input */
        tvp5150_selmux(sd);
@@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
                for (i = 0; i <= 23; i++) {
                        svbi->service_lines[1][i] = 0;
                        svbi->service_lines[0][i] =
-                               tvp5150_set_vbi(sd, vbi_ram_default,
-                                      svbi->service_lines[0][i], 0xf0, i, 3);
+                               tvp5150_set_vbi(sd, svbi->service_lines[0][i],
+                                               0xf0, i, 3);
                }
                /* Enables FIFO */
                tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1);
@@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f
 
        for (i = 0; i <= 23; i++) {
                svbi->service_lines[0][i] =
-                       tvp5150_get_vbi(sd, vbi_ram_default, i);
+                       tvp5150_get_vbi(sd, i);
                mask |= svbi->service_lines[0][i];
        }
        svbi->service_set = mask;
index dc8e577b2f748a4d287dacaf817beed1096e903e..d6816effb87866b80eae57d7c45004524d8cc031 100644 (file)
@@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
                }
                return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
                                                  buffer2, buffer2_len,
-                                                 &dvbdmxfilter->filter);
+                                                 &dvbdmxfilter->filter, NULL);
        case DMX_TYPE_TS:
                if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
                        return 0;
                if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
                        return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
                                                         buffer2, buffer2_len,
-                                                        &dvbdmxfilter->feed->feed.ts);
+                                                        &dvbdmxfilter->feed->feed.ts,
+                                                        NULL);
                else
                        av7110_p2t_write(buffer1, buffer1_len,
                                         dvbdmxfilter->feed->pid,
index 4daba76ec240bc382e3b48e17f3d1cbafaaa6858..ef1bc17cdc4d37a8549b86d7085a8fdfd76abca3 100644 (file)
@@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
                buf[4] = buf[5] = 0;
        if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
                return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
-                                        &dvbdmxfeed->feed.ts);
+                                        &dvbdmxfeed->feed.ts, NULL);
        else
                return dvb_filter_pes2ts(p2t, buf, len, 1);
 }
@@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
        struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;
 
        dvbdmxfeed->cb.ts(data, 188, NULL, 0,
-                         &dvbdmxfeed->feed.ts);
+                         &dvbdmxfeed->feed.ts, NULL);
        return 0;
 }
 
@@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
                        memcpy(obuf + l, buf + c, TS_SIZE - l);
                        c = length;
                }
-               feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts);
+               feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
                pes_start = 0;
        }
 }
index 70521e0b4c5348b6a1ab563e122b557fb58c111b..bfaa806633df740ee72047d5ea81158eedfcb4b0 100644 (file)
@@ -1,7 +1,7 @@
 
 config VIDEO_AU0828
        tristate "Auvitek AU0828 support"
-       depends on I2C && INPUT && DVB_CORE && USB
+       depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2
        select I2C_ALGOBIT
        select VIDEO_TVEEPROM
        select VIDEOBUF2_VMALLOC
index a8900f5571f784014d273eb29058fa04ca08a55c..44ca66cb9b8f141e15d7ca0ccc0b7ae5c2abee60 100644 (file)
@@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data)
        struct ttusb_dec *dec = priv;
 
        dec->audio_filter->feed->cb.ts(data, 188, NULL, 0,
-                                      &dec->audio_filter->feed->feed.ts);
+                                      &dec->audio_filter->feed->feed.ts, NULL);
 
        return 0;
 }
@@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data)
        struct ttusb_dec *dec = priv;
 
        dec->video_filter->feed->cb.ts(data, 188, NULL, 0,
-                                      &dec->video_filter->feed->feed.ts);
+                                      &dec->video_filter->feed->feed.ts, NULL);
 
        return 0;
 }
@@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
 
                if (output_pva) {
                        dec->video_filter->feed->cb.ts(pva, length, NULL, 0,
-                               &dec->video_filter->feed->feed.ts);
+                               &dec->video_filter->feed->feed.ts, NULL);
                        return;
                }
 
@@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length)
        case 0x02:              /* MainAudioStream */
                if (output_pva) {
                        dec->audio_filter->feed->cb.ts(pva, length, NULL, 0,
-                               &dec->audio_filter->feed->feed.ts);
+                               &dec->audio_filter->feed->feed.ts, NULL);
                        return;
                }
 
@@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet,
 
        if (filter)
                filter->feed->cb.sec(&packet[2], length - 2, NULL, 0,
-                                    &filter->filter);
+                                    &filter->filter, NULL);
 }
 
 static void ttusb_dec_process_packet(struct ttusb_dec *dec)
index bf52fbd07aeddc3f228cef870d05e9622699bd03..8e37e7c5e0f7e25aca250b8b052f274f2b8f3c09 100644 (file)
@@ -7,6 +7,7 @@ config VIDEO_V4L2
        tristate
        depends on (I2C || I2C=n) && VIDEO_DEV
        select RATIONAL
+       select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE
        default (I2C || I2C=n) && VIDEO_DEV
 
 config VIDEO_ADV_DEBUG
index 80de2cb9c476a256634815155a4d033a428228af..7df54582e95689ef2c189652dd5c33ea735a8023 100644 (file)
@@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y)
 endif
 obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
 ifeq ($(CONFIG_TRACEPOINTS),y)
-  videodev-objs += vb2-trace.o v4l2-trace.o
+  videodev-objs += v4l2-trace.o
 endif
 videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o
 
@@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
 
 ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
 ccflags-y += -I$(srctree)/drivers/media/tuners
-
index 0a7bdbed3a6f0185ce35ec49d2e6ccce80592a9c..e9c1485c32b95c66c670e698557632964a946072 100644 (file)
 #define REG_TO_DCPU_MBOX       0x10
 #define REG_TO_HOST_MBOX       0x14
 
+/* Macros to process offsets returned by the DCPU */
+#define DRAM_MSG_ADDR_OFFSET   0x0
+#define DRAM_MSG_TYPE_OFFSET   0x1c
+#define DRAM_MSG_ADDR_MASK     ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
+#define DRAM_MSG_TYPE_MASK     ((1UL << \
+                                (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
+
 /* Message RAM */
-#define DCPU_MSG_RAM(x)                (0x100 + (x) * sizeof(u32))
+#define DCPU_MSG_RAM_START     0x100
+#define DCPU_MSG_RAM(x)                (DCPU_MSG_RAM_START + (x) * sizeof(u32))
 
 /* DRAM Info Offsets & Masks */
 #define DRAM_INFO_INTERVAL     0x0
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[])
        return sum;
 }
 
+static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
+                                char *buf, ssize_t *size)
+{
+       unsigned int msg_type;
+       unsigned int offset;
+       void __iomem *ptr = NULL;
+
+       msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
+       offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
+
+       /*
+        * msg_type == 1: the offset is relative to the message RAM
+        * msg_type == 0: the offset is relative to the data RAM (this is the
+        *                previous way of passing data)
+        * msg_type is anything else: there's critical hardware problem
+        */
+       switch (msg_type) {
+       case 1:
+               ptr = priv->regs + DCPU_MSG_RAM_START + offset;
+               break;
+       case 0:
+               ptr = priv->dmem + offset;
+               break;
+       default:
+               dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
+                       response);
+               if (buf && size)
+                       *size = sprintf(buf,
+                               "FATAL: communication error with DCPU\n");
+       }
+
+       return ptr;
+}
+
 static int __send_command(struct private_data *priv, unsigned int cmd,
                          u32 result[])
 {
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
 {
        u32 response[MSG_FIELD_MAX];
        unsigned int info;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf);
        if (ret)
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev,
        u32 response[MSG_FIELD_MAX];
        void __iomem *info;
        struct private_data *priv;
-       unsigned int offset;
        u8 refresh, sr_abort, ppre, thermal_offs, tuf;
        u32 mr4;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf);
        if (ret)
                return ret;
 
        priv = dev_get_drvdata(dev);
-       offset = response[MSG_ARG0];
-       info = priv->dmem + offset;
+
+       info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+       if (!info)
+               return ret;
 
        mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK;
 
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
        u32 response[MSG_FIELD_MAX];
        struct private_data *priv;
        void __iomem *info;
-       unsigned int offset;
        unsigned long val;
        int ret;
 
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       offset = response[MSG_ARG0];
-       info = priv->dmem + offset;
+       info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
+       if (!info)
+               return -EIO;
+
        writel_relaxed(val, info + DRAM_INFO_INTERVAL);
 
        return count;
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
        u32 response[MSG_FIELD_MAX];
        struct private_data *priv;
        void __iomem *info;
-       unsigned int offset;
-       int ret;
+       ssize_t ret;
 
        ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf);
        if (ret)
                return ret;
 
-       offset = response[MSG_ARG0];
        priv = dev_get_drvdata(dev);
-       info = priv->dmem + offset;
+
+       info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
+       if (!info)
+               return ret;
 
        return sprintf(buf, "%#x %#x %#x %#x %#x\n",
                       readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK,
                       readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK,
-                      readl_relaxed(info + DRAM_VENDOR_ERROR));
+                      readl_relaxed(info + DRAM_VENDOR_ERROR) &
+                                    DRAM_VENDOR_MASK);
 }
 
 static int brcmstb_dpfe_resume(struct platform_device *pdev)
index 8d12017b9893a2bf0a6f2aefd7d62acca22d939a..4470630dd54552032a3d92dc73c101289ab62d41 100644 (file)
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)
                                __FILE__, __LINE__, iocnum);
                return -ENODEV;
        }
+       if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
+               return -EINVAL;
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
            ioc->name));
 
index 3e5eabdae8d968d92ac9cedd5c83225267513126..772d02922529e2722184d23df7e65891f1b2b596 100644 (file)
@@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
                goto out;
        }
 
-       if (bus->dev_state == MEI_DEV_POWER_DOWN) {
-               dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
-               err = 0;
-               goto out;
-       }
-
        err = mei_cl_disconnect(cl);
        if (err < 0)
                dev_err(bus->dev, "Could not disconnect from the ME client\n");
index be64969d986abcd6e6da6fcec6bb2f019b404a22..7e60c1817c311e0b54a0711c116adb8eea89cf00 100644 (file)
@@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl)
                return 0;
        }
 
+       if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+               cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
+               mei_cl_set_disconnected(cl);
+               return 0;
+       }
+
        rets = pm_runtime_get(dev->dev);
        if (rets < 0 && rets != -EINPROGRESS) {
                pm_runtime_put_noidle(dev->dev);
index 0ccccbaf530d258a08acf633d34d9861c630ba64..e4b10b2d1a0838af03f16f29922b86c653cf99ba 100644 (file)
 #define MEI_DEV_ID_KBP        0xA2BA  /* Kaby Point */
 #define MEI_DEV_ID_KBP_2      0xA2BB  /* Kaby Point 2 */
 
+#define MEI_DEV_ID_CNP_LP     0x9DE0  /* Cannon Point LP */
+#define MEI_DEV_ID_CNP_LP_4   0x9DE4  /* Cannon Point LP 4 (iTouch) */
+#define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
+#define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
+
 /*
  * MEI HW Section
  */
index 4a0ccda4d04b9a898c4cd3163ada9d94f581242e..ea4e152270a3b0270b1dc23edd8c3620af0ffd26 100644 (file)
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+
        /* required last entry */
        {0, }
 };
index 2dd2db9bc1c90f771ab19a4feabb167a6c367071..337462e1569fee2628fc6126c29c3acc9cc904ae 100644 (file)
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
                if (!rc) {
                        rc = copy_to_user((u64 __user *) args, &irq_offset,
                                        sizeof(irq_offset));
-                       if (rc)
+                       if (rc) {
                                ocxl_afu_irq_free(ctx, irq_offset);
+                               return -EFAULT;
+                       }
                }
                break;
 
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
 
        used += sizeof(header);
 
-       rc = (ssize_t) used;
+       rc = used;
        return rc;
 }
 
index 908e4db03535b038d548563188c0a11ac1ce4761..42d6aa89a48a95ad82bc60445f73971ac48730d2 100644 (file)
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
                return 1;
        }
 
-       mmc_claim_host(card->host);
        err = mmc_send_status(card, &status);
        if (err) {
                pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
        } while (!err);
 
 out:
-       mmc_release_host(card->host);
        return err;
 }
 
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card)
        int err;
        u8 *ext_csd;
 
-       mmc_claim_host(card->host);
        err = mmc_get_ext_csd(card, &ext_csd);
-       mmc_release_host(card->host);
        if (err)
                return err;
 
index 35026795be2803c7387203c5719f4442c15b7c50..fa41d9422d57e04e9b12ef097a74d52418bdd87b 100644 (file)
@@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
 
 static const struct dw_mci_drv_data exynos_drv_data = {
        .caps                   = exynos_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(exynos_dwmmc_caps),
        .init                   = dw_mci_exynos_priv_init,
        .set_ios                = dw_mci_exynos_set_ios,
        .parse_dt               = dw_mci_exynos_parse_dt,
index 73fd75c3c824904d7171a51f16943192f6c1bc03..89cdb3d533bb519f57e3a4c7d6a7570f54f3077c 100644 (file)
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
        if (priv->ctrl_id < 0)
                priv->ctrl_id = 0;
 
+       if (priv->ctrl_id >= TIMING_MODE)
+               return -EINVAL;
+
        host->priv = priv;
        return 0;
 }
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
 
 static const struct dw_mci_drv_data hi6220_data = {
        .caps                   = dw_mci_hi6220_caps,
+       .num_caps               = ARRAY_SIZE(dw_mci_hi6220_caps),
        .switch_voltage         = dw_mci_hi6220_switch_voltage,
        .set_ios                = dw_mci_hi6220_set_ios,
        .parse_dt               = dw_mci_hi6220_parse_dt,
index a3f1c2b3014534515db2246b0921b1be3a8b9c7d..3392952129355b2752f3131c0ff38a96b4e9d088 100644 (file)
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
 
 static const struct dw_mci_drv_data rk3288_drv_data = {
        .caps                   = dw_mci_rk3288_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
        .set_ios                = dw_mci_rk3288_set_ios,
        .execute_tuning         = dw_mci_rk3288_execute_tuning,
        .parse_dt               = dw_mci_rk3288_parse_dt,
index d38e94ae2b855ccdbcd8c74db19950b9b9ea310e..c06b5393312ff185ffa9487931578665c74efd62 100644 (file)
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
 
 static const struct dw_mci_drv_data zx_drv_data = {
        .caps                   = zx_dwmmc_caps,
+       .num_caps               = ARRAY_SIZE(zx_dwmmc_caps),
        .execute_tuning         = dw_mci_zx_execute_tuning,
        .prepare_hs400_tuning   = dw_mci_zx_prepare_hs400_tuning,
        .parse_dt               = dw_mci_zx_parse_dt,
index 0aa39975f33b8fbf36f0995cb56ffe283c7ca1b2..d9b4acefed31e6303aa614577f6b455afa3c04ba 100644 (file)
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
 {
        struct dw_mci *host = s->private;
 
+       pm_runtime_get_sync(host->dev);
+
        seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
        seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
        seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
        seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
        seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
 
+       pm_runtime_put_autosuspend(host->dev);
+
        return 0;
 }
 
@@ -2778,12 +2782,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
+{
+       struct dw_mci *host = slot->host;
+       const struct dw_mci_drv_data *drv_data = host->drv_data;
+       struct mmc_host *mmc = slot->mmc;
+       int ctrl_id;
+
+       if (host->pdata->caps)
+               mmc->caps = host->pdata->caps;
+
+       /*
+        * Support MMC_CAP_ERASE by default.
+        * It needs to use trim/discard/erase commands.
+        */
+       mmc->caps |= MMC_CAP_ERASE;
+
+       if (host->pdata->pm_caps)
+               mmc->pm_caps = host->pdata->pm_caps;
+
+       if (host->dev->of_node) {
+               ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
+               if (ctrl_id < 0)
+                       ctrl_id = 0;
+       } else {
+               ctrl_id = to_platform_device(host->dev)->id;
+       }
+
+       if (drv_data && drv_data->caps) {
+               if (ctrl_id >= drv_data->num_caps) {
+                       dev_err(host->dev, "invalid controller id %d\n",
+                               ctrl_id);
+                       return -EINVAL;
+               }
+               mmc->caps |= drv_data->caps[ctrl_id];
+       }
+
+       if (host->pdata->caps2)
+               mmc->caps2 = host->pdata->caps2;
+
+       /* Process SDIO IRQs through the sdio_irq_work. */
+       if (mmc->caps & MMC_CAP_SDIO_IRQ)
+               mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
+       return 0;
+}
+
 static int dw_mci_init_slot(struct dw_mci *host)
 {
        struct mmc_host *mmc;
        struct dw_mci_slot *slot;
-       const struct dw_mci_drv_data *drv_data = host->drv_data;
-       int ctrl_id, ret;
+       int ret;
        u32 freq[2];
 
        mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
@@ -2817,38 +2866,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
        if (!mmc->ocr_avail)
                mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
 
-       if (host->pdata->caps)
-               mmc->caps = host->pdata->caps;
-
-       /*
-        * Support MMC_CAP_ERASE by default.
-        * It needs to use trim/discard/erase commands.
-        */
-       mmc->caps |= MMC_CAP_ERASE;
-
-       if (host->pdata->pm_caps)
-               mmc->pm_caps = host->pdata->pm_caps;
-
-       if (host->dev->of_node) {
-               ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
-               if (ctrl_id < 0)
-                       ctrl_id = 0;
-       } else {
-               ctrl_id = to_platform_device(host->dev)->id;
-       }
-       if (drv_data && drv_data->caps)
-               mmc->caps |= drv_data->caps[ctrl_id];
-
-       if (host->pdata->caps2)
-               mmc->caps2 = host->pdata->caps2;
-
        ret = mmc_of_parse(mmc);
        if (ret)
                goto err_host_allocated;
 
-       /* Process SDIO IRQs through the sdio_irq_work. */
-       if (mmc->caps & MMC_CAP_SDIO_IRQ)
-               mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+       ret = dw_mci_init_slot_caps(slot);
+       if (ret)
+               goto err_host_allocated;
 
        /* Useful defaults if platform data is unset. */
        if (host->use_dma == TRANS_MODE_IDMAC) {
index e3124f06a47ef52840dc5abb77a7febd104b083d..1424bd490dd1bc1bcbd71dd4b2d803508db337b0 100644 (file)
@@ -543,6 +543,7 @@ struct dw_mci_slot {
 /**
  * dw_mci driver data - dw-mshc implementation specific driver data.
  * @caps: mmc subsystem specified capabilities of the controller(s).
+ * @num_caps: number of capabilities specified by @caps.
  * @init: early implementation specific initialization.
  * @set_ios: handle bus specific extensions.
  * @parse_dt: parse implementation specific device tree properties.
@@ -554,6 +555,7 @@ struct dw_mci_slot {
  */
 struct dw_mci_drv_data {
        unsigned long   *caps;
+       u32             num_caps;
        int             (*init)(struct dw_mci *host);
        void            (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
        int             (*parse_dt)(struct dw_mci *host);
index 6d1a983e622722b527d8a3c1b6cdea449c88d180..82c4f05f91d8781528a365d97b3033472df987f4 100644 (file)
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
        slot->chip->rpm_retune = intel_host->d3_retune;
 }
 
-static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       int err = sdhci_execute_tuning(mmc, opcode);
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       if (err)
+               return err;
+
+       /*
+        * Tuning can leave the IP in an active state (Buffer Read Enable bit
+        * set) which prevents the entry to low power states (i.e. S0i3). Data
+        * reset will clear it.
+        */
+       sdhci_reset(host, SDHCI_RESET_DATA);
+
+       return 0;
+}
+
+static void byt_probe_slot(struct sdhci_pci_slot *slot)
 {
+       struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
+
        byt_read_dsm(slot);
+
+       ops->execute_tuning = intel_execute_tuning;
+}
+
+static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
                                 MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
                                 MMC_CAP_CMD_DURING_TFR |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
        int err;
 
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
 
        err = ni_set_max_freq(slot);
        if (err)
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 
 static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 {
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
                                 MMC_CAP_WAIT_WHILE_BUSY;
        return 0;
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
 
 static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
 {
-       byt_read_dsm(slot);
+       byt_probe_slot(slot);
        slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
                                 MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
        slot->cd_idx = 0;
index 3e5833cf1fabaa35f1fde959bcd4b7ed988ca44c..eb23f9ba1a9a10091f8d42bb71e35291e224c4f1 100644 (file)
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
        struct net_device *netdev = pdata->netdev;
        int ret = 0;
 
+       XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
        pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
        XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
 
index 22889fc158f2779d57f990f1fb52641d55cfe550..87c4308b52a7cc7666a88984712d24198214a741 100644 (file)
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,
                goto err_ioremap;
 
        self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+       if (!self->aq_hw) {
+               err = -ENOMEM;
+               goto err_ioremap;
+       }
        self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
 
        for (bar = 0; bar < 4; ++bar) {
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
                        mmio_pa = pci_resource_start(pdev, bar);
                        if (mmio_pa == 0U) {
                                err = -EIO;
-                               goto err_ioremap;
+                               goto err_free_aq_hw;
                        }
 
                        reg_sz = pci_resource_len(pdev, bar);
                        if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
                                err = -EIO;
-                               goto err_ioremap;
+                               goto err_free_aq_hw;
                        }
 
                        self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
                        if (!self->aq_hw->mmio) {
                                err = -EIO;
-                               goto err_ioremap;
+                               goto err_free_aq_hw;
                        }
                        break;
                }
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
 
        if (bar == 4) {
                err = -EIO;
-               goto err_ioremap;
+               goto err_free_aq_hw;
        }
 
        numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -290,6 +294,8 @@ err_register:
        aq_pci_free_irq_vectors(self);
 err_hwinit:
        iounmap(self->aq_hw->mmio);
+err_free_aq_hw:
+       kfree(self->aq_hw);
 err_ioremap:
        free_netdev(ndev);
 err_pci_func:
index a77ee2f8fb8d223cc121a36a2ffb703cc32a02d9..c1841db1b500fa49f823c79e56cb3bc05f3f9199 100644 (file)
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
 
                tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
 
-               udelay(10);
+               usleep_range(10, 20);
                timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
        }
 
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)
        if (!(apedata & APE_FW_STATUS_READY))
                return -EAGAIN;
 
-       /* Wait for up to 1 millisecond for APE to service previous event. */
-       err = tg3_ape_event_lock(tp, 1000);
+       /* Wait for up to 20 millisecond for APE to service previous event. */
+       err = tg3_ape_event_lock(tp, 20000);
        if (err)
                return err;
 
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
 
        switch (kind) {
        case RESET_KIND_INIT:
+               tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
                tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
                                APE_HOST_SEG_SIG_MAGIC);
                tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
                event = APE_EVENT_STATUS_STATE_START;
                break;
        case RESET_KIND_SHUTDOWN:
-               /* With the interface we are currently using,
-                * APE does not track driver state.  Wiping
-                * out the HOST SEGMENT SIGNATURE forces
-                * the APE to assume OS absent status.
-                */
-               tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
-
                if (device_may_wakeup(&tp->pdev->dev) &&
                    tg3_flag(tp, WOL_ENABLE)) {
                        tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
        tg3_ape_send_event(tp, event);
 }
 
+static void tg3_send_ape_heartbeat(struct tg3 *tp,
+                                  unsigned long interval)
+{
+       /* Check if hb interval has exceeded */
+       if (!tg3_flag(tp, ENABLE_APE) ||
+           time_before(jiffies, tp->ape_hb_jiffies + interval))
+               return;
+
+       tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
+       tp->ape_hb_jiffies = jiffies;
+}
+
 static void tg3_disable_ints(struct tg3 *tp)
 {
        int i;
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
                }
        }
 
+       tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
        return work_done;
 
 tx_recovery:
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
                }
        }
 
+       tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
        return work_done;
 
 tx_recovery:
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
        if (tg3_flag(tp, ENABLE_APE))
                /* Write our heartbeat update interval to APE. */
                tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
-                               APE_HOST_HEARTBEAT_INT_DISABLE);
+                               APE_HOST_HEARTBEAT_INT_5SEC);
 
        tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
 
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)
                tp->asf_counter = tp->asf_multiplier;
        }
 
+       /* Update the APE heartbeat every 5 seconds.*/
+       tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
+
        spin_unlock(&tp->lock);
 
 restart_timer:
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
                                       pci_state_reg);
 
                tg3_ape_lock_init(tp);
+               tp->ape_hb_interval =
+                       msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
        }
 
        /* Set up tp->grc_local_ctrl before calling
index 47f51cc0566d490b1ab3347ddbbebe080156677c..1d61aa3efda177c64c69465f0b72c0df5221ba37 100644 (file)
 #define TG3_APE_LOCK_PHY3              5
 #define TG3_APE_LOCK_GPIO              7
 
+#define TG3_APE_HB_INTERVAL             (tp->ape_hb_interval)
 #define TG3_EEPROM_SB_F1R2_MBA_OFF     0x10
 
 
@@ -3423,6 +3424,10 @@ struct tg3 {
        struct device                   *hwmon_dev;
        bool                            link_up;
        bool                            pcierr_recovery;
+
+       u32                             ape_hb;
+       unsigned long                   ape_hb_interval;
+       unsigned long                   ape_hb_jiffies;
 };
 
 /* Accessor macros for chip and asic attributes
index c87c9c684a337ea3651ac668d693c73a34238a9b..d59497a7bdcebd6e3c93194daf4a073334fab786 100644 (file)
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);
 
 void cavium_ptp_put(struct cavium_ptp *ptp)
 {
+       if (!ptp)
+               return;
        pci_dev_put(ptp->pdev);
 }
 EXPORT_SYMBOL(cavium_ptp_put);
index b68cde9f17d2b10c08ca60c7a4d8c80f2d095b97..7d9c5ffbd0412191fc112a5eb7174ef318786276 100644 (file)
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);
 MODULE_PARM_DESC(cpi_alg,
                 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
-struct nicvf_xdp_tx {
-       u64 dma_addr;
-       u8  qidx;
-};
-
 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
 {
        if (nic->sqs_mode)
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)
        return 0;
 }
 
-static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
-{
-       /* Check if it's a recycled page, if not unmap the DMA mapping.
-        * Recycled page holds an extra reference.
-        */
-       if (page_ref_count(page) == 1) {
-               dma_addr &= PAGE_MASK;
-               dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
-                                    RCV_FRAG_LEN + XDP_HEADROOM,
-                                    DMA_FROM_DEVICE,
-                                    DMA_ATTR_SKIP_CPU_SYNC);
-       }
-}
-
 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
                                struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
                                struct rcv_queue *rq, struct sk_buff **skb)
 {
        struct xdp_buff xdp;
        struct page *page;
-       struct nicvf_xdp_tx *xdp_tx = NULL;
        u32 action;
-       u16 len, err, offset = 0;
+       u16 len, offset = 0;
        u64 dma_addr, cpu_addr;
        void *orig_data;
 
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
        cpu_addr = (u64)phys_to_virt(cpu_addr);
        page = virt_to_page((void *)cpu_addr);
 
-       xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
+       xdp.data_hard_start = page_address(page);
        xdp.data = (void *)cpu_addr;
        xdp_set_data_meta_invalid(&xdp);
        xdp.data_end = xdp.data + len;
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 
        switch (action) {
        case XDP_PASS:
-               nicvf_unmap_page(nic, page, dma_addr);
+               /* Check if it's a recycled page, if not
+                * unmap the DMA mapping.
+                *
+                * Recycled page holds an extra reference.
+                */
+               if (page_ref_count(page) == 1) {
+                       dma_addr &= PAGE_MASK;
+                       dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+                                            RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+                                            DMA_FROM_DEVICE,
+                                            DMA_ATTR_SKIP_CPU_SYNC);
+               }
 
                /* Build SKB and pass on packet to network stack */
                *skb = build_skb(xdp.data,
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
        case XDP_TX:
                nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
                return true;
-       case XDP_REDIRECT:
-               /* Save DMA address for use while transmitting */
-               xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
-               xdp_tx->dma_addr = dma_addr;
-               xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
-
-               err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
-               if (!err)
-                       return true;
-
-               /* Free the page on error */
-               nicvf_unmap_page(nic, page, dma_addr);
-               put_page(page);
-               break;
        default:
                bpf_warn_invalid_xdp_action(action);
                /* fall through */
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
                trace_xdp_exception(nic->netdev, prog, action);
                /* fall through */
        case XDP_DROP:
-               nicvf_unmap_page(nic, page, dma_addr);
+               /* Check if it's a recycled page, if not
+                * unmap the DMA mapping.
+                *
+                * Recycled page holds an extra reference.
+                */
+               if (page_ref_count(page) == 1) {
+                       dma_addr &= PAGE_MASK;
+                       dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+                                            RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+                                            DMA_FROM_DEVICE,
+                                            DMA_ATTR_SKIP_CPU_SYNC);
+               }
                put_page(page);
                return true;
        }
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
        }
 }
 
-static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
-{
-       struct nicvf *nic = netdev_priv(netdev);
-       struct nicvf *snic = nic;
-       struct nicvf_xdp_tx *xdp_tx;
-       struct snd_queue *sq;
-       struct page *page;
-       int err, qidx;
-
-       if (!netif_running(netdev) || !nic->xdp_prog)
-               return -EINVAL;
-
-       page = virt_to_page(xdp->data);
-       xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
-       qidx = xdp_tx->qidx;
-
-       if (xdp_tx->qidx >= nic->xdp_tx_queues)
-               return -EINVAL;
-
-       /* Get secondary Qset's info */
-       if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
-               qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
-               snic = (struct nicvf *)nic->snicvf[qidx - 1];
-               if (!snic)
-                       return -EINVAL;
-               qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
-       }
-
-       sq = &snic->qs->sq[qidx];
-       err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
-                                     xdp_tx->dma_addr,
-                                     xdp->data_end - xdp->data);
-       if (err)
-               return -ENOMEM;
-
-       nicvf_xdp_sq_doorbell(snic, sq, qidx);
-       return 0;
-}
-
-static void nicvf_xdp_flush(struct net_device *dev)
-{
-       return;
-}
-
 static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
 {
        struct hwtstamp_config config;
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {
        .ndo_fix_features       = nicvf_fix_features,
        .ndo_set_features       = nicvf_set_features,
        .ndo_bpf                = nicvf_xdp,
-       .ndo_xdp_xmit           = nicvf_xdp_xmit,
-       .ndo_xdp_flush          = nicvf_xdp_flush,
        .ndo_do_ioctl           = nicvf_ioctl,
 };
 
index 3eae9ff9b53a69413c986bb189ece2dc84f597ea..d42704d0748434d92eb17c74b0b0530aaa50f536 100644 (file)
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
 
        /* Reserve space for header modifications by BPF program */
        if (rbdr->is_xdp)
-               buf_len += XDP_HEADROOM;
+               buf_len += XDP_PACKET_HEADROOM;
 
        /* Check if it's recycled */
        if (pgcache)
@@ -224,9 +224,8 @@ ret:
                        nic->rb_page = NULL;
                        return -ENOMEM;
                }
-
                if (pgcache)
-                       pgcache->dma_addr = *rbuf + XDP_HEADROOM;
+                       pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
                nic->rb_page_offset += buf_len;
        }
 
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
        int qentry;
 
        if (subdesc_cnt > sq->xdp_free_cnt)
-               return -1;
+               return 0;
 
        qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 
        sq->xdp_desc_cnt += subdesc_cnt;
 
-       return 0;
+       return 1;
 }
 
 /* Calculate no of SQ subdescriptors needed to transmit all
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
                if (page_ref_count(page) != 1)
                        return;
 
-               len += XDP_HEADROOM;
+               len += XDP_PACKET_HEADROOM;
                /* Receive buffers in XDP mode are mapped from page start */
                dma_addr &= PAGE_MASK;
        }
index ce1eed7a6d63bd2671b5a3706d03e1f7e9e08010..5e9a03cf1b4d30f220f789934275fb9f2c9ce660 100644 (file)
@@ -11,7 +11,6 @@
 
 #include <linux/netdevice.h>
 #include <linux/iommu.h>
-#include <linux/bpf.h>
 #include <net/xdp.h>
 #include "q_struct.h"
 
@@ -94,9 +93,6 @@
 #define RCV_FRAG_LEN    (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
-#define RCV_BUF_HEADROOM       128 /* To store dma address for XDP redirect */
-#define XDP_HEADROOM           (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
-
 #define MAX_CQES_FOR_TX                ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
                                 MAX_CQE_PER_PKT_XMIT)
 
index 557fd8bfd54e575646f083e5c426ba6c28ecf338..00a1d2d13169901becc9ffd4c283a8e696372524 100644 (file)
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
 
        if (is_t6(padap->params.chip)) {
                size = padap->params.cim_la_size / 10 + 1;
-               size *= 11 * sizeof(u32);
+               size *= 10 * sizeof(u32);
        } else {
                size = padap->params.cim_la_size / 8;
                size *= 8 * sizeof(u32);
index 30485f9a598f159fb5d44bba3cdd1d9cf1a36fdb..143686c60234ebfc23fe84762f40dbe4bdf98d18 100644 (file)
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
        case CUDBG_CIM_LA:
                if (is_t6(adap->params.chip)) {
                        len = adap->params.cim_la_size / 10 + 1;
-                       len *= 11 * sizeof(u32);
+                       len *= 10 * sizeof(u32);
                } else {
                        len = adap->params.cim_la_size / 8;
                        len *= 8 * sizeof(u32);
index 56bc626ef00688a5a03f4ccdd349e3acd44555b7..7b452e85de2ad18a5613ac41687a9eaeab872f9f 100644 (file)
@@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
 
        pcie_fw = readl(adap->regs + PCIE_FW_A);
        /* Check if cxgb4 is the MASTER and fw is initialized */
-       if (!(pcie_fw & PCIE_FW_INIT_F) ||
+       if (num_vfs &&
+           (!(pcie_fw & PCIE_FW_INIT_F) ||
            !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
-           PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
+           PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
                dev_warn(&pdev->dev,
                         "cxgb4 driver needs to be MASTER to support SRIOV\n");
                return -EOPNOTSUPP;
@@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev)
 #if IS_ENABLED(CONFIG_IPV6)
                t4_cleanup_clip_tbl(adapter);
 #endif
-               iounmap(adapter->regs);
                if (!is_t4(adapter->params.chip))
                        iounmap(adapter->bar2);
-               pci_disable_pcie_error_reporting(pdev);
-               if ((adapter->flags & DEV_ENABLED)) {
-                       pci_disable_device(pdev);
-                       adapter->flags &= ~DEV_ENABLED;
-               }
-               pci_release_regions(pdev);
-               kfree(adapter->mbox_log);
-               synchronize_rcu();
-               kfree(adapter);
        }
 #ifdef CONFIG_PCI_IOV
        else {
                cxgb4_iov_configure(adapter->pdev, 0);
        }
 #endif
+       iounmap(adapter->regs);
+       pci_disable_pcie_error_reporting(pdev);
+       if ((adapter->flags & DEV_ENABLED)) {
+               pci_disable_device(pdev);
+               adapter->flags &= ~DEV_ENABLED;
+       }
+       pci_release_regions(pdev);
+       kfree(adapter->mbox_log);
+       synchronize_rcu();
+       kfree(adapter);
 }
 
 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
index 047609ef0515a5019673e3b6f5b2cdad9da15b38..920bccd6bc406515574bafdbd6b573d65fa02e6c 100644 (file)
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
-#define VPD_SIZE           0x800
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
-       /* We have two VPD data structures stored in the adapter VPD area.
-        * By default, Linux calculates the size of the VPD area by traversing
-        * the first VPD area at offset 0x0, so we need to tell the OS what
-        * our real VPD size is.
-        */
-       ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
-       if (ret < 0)
-               goto out;
-
        /* Card information normally starts at VPD_BASE but early cards had
         * it at 0.
         */
index 3bdeb295514bde273404c516d07b6c0c024aa025..f5c87bd35fa1ae52e854ea1bc75e4e9d80a8b4dc 100644 (file)
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
 {
        int size = lstatus & BD_LENGTH_MASK;
        struct page *page = rxb->page;
-       bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
-
-       /* Remove the FCS from the packet length */
-       if (last)
-               size -= ETH_FCS_LEN;
 
        if (likely(first)) {
                skb_put(skb, size);
        } else {
                /* the last fragments' length contains the full frame length */
-               if (last)
+               if (lstatus & BD_LFLAG(RXBD_LAST))
                        size -= skb->len;
 
-               /* Add the last fragment if it contains something other than
-                * the FCS, otherwise drop it and trim off any part of the FCS
-                * that was already received.
-                */
-               if (size > 0)
-                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                                       rxb->page_offset + RXBUF_ALIGNMENT,
-                                       size, GFAR_RXB_TRUESIZE);
-               else if (size < 0)
-                       pskb_trim(skb, skb->len + size);
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               rxb->page_offset + RXBUF_ALIGNMENT,
+                               size, GFAR_RXB_TRUESIZE);
        }
 
        /* try reuse page */
@@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
        if (priv->padding)
                skb_pull(skb, priv->padding);
 
+       /* Trim off the FCS */
+       pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
        if (ndev->features & NETIF_F_RXCSUM)
                gfar_rx_checksum(skb, fcb);
 
index 27447260215d1bb36a9d0cd5540efadf52514cb6..1b3cc8bb07050f6856eb4f3e7fe28e4296219d49 100644 (file)
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)
        return 0;
 }
 
+static void release_login_buffer(struct ibmvnic_adapter *adapter)
+{
+       kfree(adapter->login_buf);
+       adapter->login_buf = NULL;
+}
+
+static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
+{
+       kfree(adapter->login_rsp_buf);
+       adapter->login_rsp_buf = NULL;
+}
+
 static void release_resources(struct ibmvnic_adapter *adapter)
 {
        int i;
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)
                        }
                }
        }
+       kfree(adapter->napi);
+       adapter->napi = NULL;
+
+       release_login_rsp_buffer(adapter);
 }
 
 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)
        return rc;
 }
 
+static void clean_rx_pools(struct ibmvnic_adapter *adapter)
+{
+       struct ibmvnic_rx_pool *rx_pool;
+       u64 rx_entries;
+       int rx_scrqs;
+       int i, j;
+
+       if (!adapter->rx_pool)
+               return;
+
+       rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+       rx_entries = adapter->req_rx_add_entries_per_subcrq;
+
+       /* Free any remaining skbs in the rx buffer pools */
+       for (i = 0; i < rx_scrqs; i++) {
+               rx_pool = &adapter->rx_pool[i];
+               if (!rx_pool)
+                       continue;
+
+               netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
+               for (j = 0; j < rx_entries; j++) {
+                       if (rx_pool->rx_buff[j].skb) {
+                               dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
+                               rx_pool->rx_buff[j].skb = NULL;
+                       }
+               }
+       }
+}
+
 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_tx_pool *tx_pool;
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)
                        }
                }
        }
-
+       clean_rx_pools(adapter);
        clean_tx_pools(adapter);
        adapter->state = VNIC_CLOSED;
        return rc;
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                return 0;
        }
 
-       netif_carrier_on(netdev);
-
        /* kick napi */
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_schedule(&adapter->napi[i]);
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        if (adapter->reset_reason != VNIC_RESET_FAILOVER)
                netdev_notify_peers(netdev);
 
+       netif_carrier_on(netdev);
+
        return 0;
 }
 
@@ -1853,6 +1898,12 @@ restart_poll:
                                   be16_to_cpu(next->rx_comp.rc));
                        /* free the entry */
                        next->rx_comp.first = 0;
+                       dev_kfree_skb_any(rx_buff->skb);
+                       remove_buff_from_pool(adapter, rx_buff);
+                       continue;
+               } else if (!rx_buff->skb) {
+                       /* free the entry */
+                       next->rx_comp.first = 0;
                        remove_buff_from_pool(adapter, rx_buff);
                        continue;
                }
@@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
        struct vnic_login_client_data *vlcd;
        int i;
 
+       release_login_rsp_buffer(adapter);
        client_data_len = vnic_client_data_len(adapter);
 
        buffer_size =
@@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
                ibmvnic_remove(adapter->vdev);
                return -EIO;
        }
+       release_login_buffer(adapter);
        complete(&adapter->init_done);
 
        return 0;
index a1d7b88cf0835de86455f21b1f967d739207b572..5a1668cdb461c7437cf510b5592f7ab54134b0f7 100644 (file)
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
        int id = port->id;
        bool allmulti = dev->flags & IFF_ALLMULTI;
 
+retry:
        mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
        mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
        /* Remove all port->id's mcast enries */
        mvpp2_prs_mcast_del_all(priv, id);
 
-       if (allmulti && !netdev_mc_empty(dev)) {
-               netdev_for_each_mc_addr(ha, dev)
-                       mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+       if (!allmulti) {
+               netdev_for_each_mc_addr(ha, dev) {
+                       if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
+                               allmulti = true;
+                               goto retry;
+                       }
+               }
        }
 }
 
index 0be4575b58a27261084ee25058ed56039611e31b..fd509160c8f6cc2f1ee81ae2594feb8952e37a8e 100644 (file)
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
                                          "%pI4");
                } else if (ethertype.v == ETH_P_IPV6) {
                        static const struct in6_addr full_ones = {
-                               .in6_u.u6_addr32 = {htonl(0xffffffff),
-                                                   htonl(0xffffffff),
-                                                   htonl(0xffffffff),
-                                                   htonl(0xffffffff)},
+                               .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+                                                   __constant_htonl(0xffffffff),
+                                                   __constant_htonl(0xffffffff),
+                                                   __constant_htonl(0xffffffff)},
                        };
                        DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
                        DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
index 47bab842c5eea8656a68002440d616c7c94c4611..da94c8cba5ee1b7e8f6309d81fa7db29a1db10b6 100644 (file)
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        param->wq.linear = 1;
 }
 
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+                                     struct mlx5e_rq_param *param)
 {
        void *rqc = param->rqc;
        void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
 
        MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
        MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+
+       param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
 }
 
 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
                               struct mlx5e_cq *cq,
                               struct mlx5e_cq_param *param)
 {
+       param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+       param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev);
+
        return mlx5e_alloc_cq_common(mdev, param, cq);
 }
 
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
        struct mlx5e_cq *cq = &drop_rq->cq;
        int err;
 
-       mlx5e_build_drop_rq_param(&rq_param);
+       mlx5e_build_drop_rq_param(mdev, &rq_param);
 
        err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
        if (err)
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
 }
 #endif
 
-int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
-                  void *type_data)
+static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+                         void *type_data)
 {
        switch (type) {
 #ifdef CONFIG_MLX5_ESWITCH
index 0d4bb0688faa1349913d25c13f2366b130a06682..e5c3ab46a24a5134de9349a8e761504cfa49a33d 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/tcp.h>
 #include <linux/bpf_trace.h>
 #include <net/busy_poll.h>
+#include <net/ip6_checksum.h>
 #include "en.h"
 #include "en_tc.h"
 #include "eswitch.h"
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
        return true;
 }
 
+static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+{
+       u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+       u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
+                        (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
+
+       tcp->check                      = 0;
+       tcp->psh                        = get_cqe_lro_tcppsh(cqe);
+
+       if (tcp_ack) {
+               tcp->ack                = 1;
+               tcp->ack_seq            = cqe->lro_ack_seq_num;
+               tcp->window             = cqe->lro_tcp_win;
+       }
+}
+
 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
                                 u32 cqe_bcnt)
 {
        struct ethhdr   *eth = (struct ethhdr *)(skb->data);
        struct tcphdr   *tcp;
        int network_depth = 0;
+       __wsum check;
        __be16 proto;
        u16 tot_len;
        void *ip_p;
 
-       u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
-       u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
-               (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
-
        proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
 
        tot_len = cqe_bcnt - network_depth;
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
                ipv4->check             = 0;
                ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
                                                       ipv4->ihl);
+
+               mlx5e_lro_update_tcp_hdr(cqe, tcp);
+               check = csum_partial(tcp, tcp->doff * 4,
+                                    csum_unfold((__force __sum16)cqe->check_sum));
+               /* Almost done, don't forget the pseudo header */
+               tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
+                                              tot_len - sizeof(struct iphdr),
+                                              IPPROTO_TCP, check);
        } else {
+               u16 payload_len = tot_len - sizeof(struct ipv6hdr);
                struct ipv6hdr *ipv6 = ip_p;
 
                tcp = ip_p + sizeof(struct ipv6hdr);
                skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 
                ipv6->hop_limit         = cqe->lro_min_ttl;
-               ipv6->payload_len       = cpu_to_be16(tot_len -
-                                                     sizeof(struct ipv6hdr));
-       }
-
-       tcp->psh = get_cqe_lro_tcppsh(cqe);
-
-       if (tcp_ack) {
-               tcp->ack                = 1;
-               tcp->ack_seq            = cqe->lro_ack_seq_num;
-               tcp->window             = cqe->lro_tcp_win;
+               ipv6->payload_len       = cpu_to_be16(payload_len);
+
+               mlx5e_lro_update_tcp_hdr(cqe, tcp);
+               check = csum_partial(tcp, tcp->doff * 4,
+                                    csum_unfold((__force __sum16)cqe->check_sum));
+               /* Almost done, don't forget the pseudo header */
+               tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
+                                            IPPROTO_TCP, check);
        }
 }
 
index 5a4608281f38d236d77d8f86f81f52d683f3eadc..707976482c0987c33b5183804e152309cc57d6d4 100644 (file)
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
        if (iph->protocol != IPPROTO_UDP)
                goto out;
 
-       udph = udp_hdr(skb);
+       /* Don't assume skb_transport_header() was set */
+       udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
        if (udph->dest != htons(9))
                goto out;
 
index fd98b0dc610fffce72c7702598271825be6e9160..fa86a1466718037f89a5bdb4b2d67e9bbe2b341e 100644 (file)
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
                        } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
-                               if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+                               if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+                                   tcf_vlan_push_prio(a))
                                        return -EOPNOTSUPP;
 
                                attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
index 569b42a010265bec16510b1a860c456b3f76a0e3..11b4f1089d1ceffc05b703118e911895d08644c8 100644 (file)
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
        default:
                hlen = mlx5e_skb_l2_header_offset(skb);
        }
-       return min_t(u16, hlen, skb->len);
+       return min_t(u16, hlen, skb_headlen(skb));
 }
 
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
index 5ecf2cddc16dfef2e53df1d8617e15fced37c377..c2b1d7d351fc29362e4ce4de641a280b92b96284 100644 (file)
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
 
+       /* Create steering drop counters for ingress and egress ACLs */
+       if (vport_num && esw->mode == SRIOV_LEGACY)
+               esw_vport_create_drop_counters(vport);
+
        /* Restore old vport configuration */
        esw_apply_vport_conf(esw, vport);
 
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
        if (!vport_num)
                vport->info.trusted = true;
 
-       /* create steering drop counters for ingress and egress ACLs */
-       if (vport_num && esw->mode == SRIOV_LEGACY)
-               esw_vport_create_drop_counters(vport);
-
        esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
index c025c98700e4cd627a503d8a982f7854421da5b6..31fc2cfac3b3beea8c8bc28851cd5172f880926e 100644 (file)
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
 
        if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP  |
                             MLX5_FLOW_CONTEXT_ACTION_ENCAP |
-                            MLX5_FLOW_CONTEXT_ACTION_DECAP))
+                            MLX5_FLOW_CONTEXT_ACTION_DECAP |
+                            MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
                return true;
 
        return false;
@@ -1758,8 +1759,11 @@ search_again_locked:
 
        /* Collect all fgs which has a matching match_criteria */
        err = build_match_list(&match_head, ft, spec);
-       if (err)
+       if (err) {
+               if (take_write)
+                       up_write_ref_node(&ft->node);
                return ERR_PTR(err);
+       }
 
        if (!take_write)
                up_read_ref_node(&ft->node);
@@ -1768,8 +1772,11 @@ search_again_locked:
                                      dest_num, version);
        free_match_list(&match_head);
        if (!IS_ERR(rule) ||
-           (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+           (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
+               if (take_write)
+                       up_write_ref_node(&ft->node);
                return rule;
+       }
 
        if (!take_write) {
                nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
index e159243e0fcfb2dadbb79cb1f0895c172ec43cf2..857035583ccdd156c7913c001c1509e0cde044bb 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/highmem.h>
 #include <rdma/mlx5-abi.h>
 #include "en.h"
+#include "clock.h"
 
 enum {
        MLX5_CYCLES_SHIFT       = 23
index 2ef641c91c267ceec1cacb744f492ef2a1ff8dc0..ae391e4b70706ec81e2dedaf003858c24be57954 100644 (file)
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
                MLX5_SET(cmd_hca_cap,
                         set_hca_cap,
                         cache_line_128byte,
-                        cache_line_size() == 128 ? 1 : 0);
+                        cache_line_size() >= 128 ? 1 : 0);
 
        if (MLX5_CAP_GEN_MAX(dev, dct))
                MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
index f0b25baba09a66fe3a1011f91e2c3e8e96637498..f7948e983637da5e887d47cd3af80ebdc599c872 100644 (file)
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
                                              u32 tb_id,
                                              struct netlink_ext_ack *extack)
 {
+       struct mlxsw_sp_mr_table *mr4_table;
+       struct mlxsw_sp_fib *fib4;
+       struct mlxsw_sp_fib *fib6;
        struct mlxsw_sp_vr *vr;
        int err;
 
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
                NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
                return ERR_PTR(-EBUSY);
        }
-       vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
-       if (IS_ERR(vr->fib4))
-               return ERR_CAST(vr->fib4);
-       vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
-       if (IS_ERR(vr->fib6)) {
-               err = PTR_ERR(vr->fib6);
+       fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
+       if (IS_ERR(fib4))
+               return ERR_CAST(fib4);
+       fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
+       if (IS_ERR(fib6)) {
+               err = PTR_ERR(fib6);
                goto err_fib6_create;
        }
-       vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
-                                                MLXSW_SP_L3_PROTO_IPV4);
-       if (IS_ERR(vr->mr4_table)) {
-               err = PTR_ERR(vr->mr4_table);
+       mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+                                            MLXSW_SP_L3_PROTO_IPV4);
+       if (IS_ERR(mr4_table)) {
+               err = PTR_ERR(mr4_table);
                goto err_mr_table_create;
        }
+       vr->fib4 = fib4;
+       vr->fib6 = fib6;
+       vr->mr4_table = mr4_table;
        vr->tb_id = tb_id;
        return vr;
 
 err_mr_table_create:
-       mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
-       vr->fib6 = NULL;
+       mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
 err_fib6_create:
-       mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
-       vr->fib4 = NULL;
+       mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
        return ERR_PTR(err);
 }
 
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
        struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
        int i;
 
+       if (!list_is_singular(&nh_grp->fib_list))
+               return;
+
        for (i = 0; i < nh_grp->count; i++) {
                struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
 
index 7e7704daf5f1e4928d69c1fa397a5f13354af2b8..c4949183eef3f0654cf8e5855ba6b85bce6b35f3 100644 (file)
 
 /* Local Definitions and Declarations */
 
-struct rmnet_walk_data {
-       struct net_device *real_dev;
-       struct list_head *head;
-       struct rmnet_port *port;
-};
-
 static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
 {
        return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)
 static void rmnet_unregister_bridge(struct net_device *dev,
                                    struct rmnet_port *port)
 {
-       struct net_device *rmnet_dev, *bridge_dev;
        struct rmnet_port *bridge_port;
+       struct net_device *bridge_dev;
 
        if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
                return;
 
        /* bridge slave handling */
        if (!port->nr_rmnet_devs) {
-               rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
-               netdev_upper_dev_unlink(dev, rmnet_dev);
-
                bridge_dev = port->bridge_ep;
 
                bridge_port = rmnet_get_port_rtnl(bridge_dev);
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,
                bridge_dev = port->bridge_ep;
 
                bridge_port = rmnet_get_port_rtnl(bridge_dev);
-               rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
-               netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
-
                rmnet_unregister_real_device(bridge_dev, bridge_port);
        }
 }
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
        if (err)
                goto err1;
 
-       err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
-       if (err)
-               goto err2;
-
        port->rmnet_mode = mode;
 
        hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 
        return 0;
 
-err2:
-       rmnet_vnd_dellink(mux_id, port, ep);
 err1:
        rmnet_unregister_real_device(real_dev, port);
 err0:
@@ -204,14 +186,13 @@ err0:
 
 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
 {
+       struct rmnet_priv *priv = netdev_priv(dev);
        struct net_device *real_dev;
        struct rmnet_endpoint *ep;
        struct rmnet_port *port;
        u8 mux_id;
 
-       rcu_read_lock();
-       real_dev = netdev_master_upper_dev_get_rcu(dev);
-       rcu_read_unlock();
+       real_dev = priv->real_dev;
 
        if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
                return;
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
        port = rmnet_get_port_rtnl(real_dev);
 
        mux_id = rmnet_vnd_get_mux(dev);
-       netdev_upper_dev_unlink(dev, real_dev);
 
        ep = rmnet_get_endpoint(port, mux_id);
        if (ep) {
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
        unregister_netdevice_queue(dev, head);
 }
 
-static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
-{
-       struct rmnet_walk_data *d = data;
-       struct rmnet_endpoint *ep;
-       u8 mux_id;
-
-       mux_id = rmnet_vnd_get_mux(rmnet_dev);
-       ep = rmnet_get_endpoint(d->port, mux_id);
-       if (ep) {
-               hlist_del_init_rcu(&ep->hlnode);
-               rmnet_vnd_dellink(mux_id, d->port, ep);
-               kfree(ep);
-       }
-       netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
-       unregister_netdevice_queue(rmnet_dev, d->head);
-
-       return 0;
-}
-
 static void rmnet_force_unassociate_device(struct net_device *dev)
 {
        struct net_device *real_dev = dev;
-       struct rmnet_walk_data d;
+       struct hlist_node *tmp_ep;
+       struct rmnet_endpoint *ep;
        struct rmnet_port *port;
+       unsigned long bkt_ep;
        LIST_HEAD(list);
 
        if (!rmnet_is_real_dev_registered(real_dev))
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       d.real_dev = real_dev;
-       d.head = &list;
-
        port = rmnet_get_port_rtnl(dev);
-       d.port = port;
 
        rcu_read_lock();
        rmnet_unregister_bridge(dev, port);
 
-       netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
+       hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+               unregister_netdevice_queue(ep->egress_dev, &list);
+               rmnet_vnd_dellink(ep->mux_id, port, ep);
+
+               hlist_del_init_rcu(&ep->hlnode);
+               kfree(ep);
+       }
+
        rcu_read_unlock();
        unregister_netdevice_many(&list);
 
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
        if (err)
                return -EBUSY;
 
-       err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
-                                          extack);
-       if (err)
-               return -EINVAL;
-
        slave_port = rmnet_get_port(slave_dev);
        slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
        slave_port->bridge_ep = real_dev;
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
        port->rmnet_mode = RMNET_EPMODE_VND;
        port->bridge_ep = NULL;
 
-       netdev_upper_dev_unlink(slave_dev, rmnet_dev);
        slave_port = rmnet_get_port(slave_dev);
        rmnet_unregister_real_device(slave_dev, slave_port);
 
index 6bc328fb88e13fba1f635ae9e7337e1395ce56d4..b0dbca070c008d386699824ce72a6d6f4c0e2d73 100644 (file)
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
        }
 
        ep = rmnet_get_endpoint(port, mux_id);
+       if (!ep) {
+               kfree_skb(skb);
+               return RX_HANDLER_CONSUMED;
+       }
+
        vnd = ep->egress_dev;
 
        ip_family = cmd->flow_control.ip_family;
index 570a227acdd8073ebe50649a8e3dd8a7d32a38a8..346d310914df17791018108d5ddd67715b04ce92 100644 (file)
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,
        memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
 
        for_each_possible_cpu(cpu) {
-               pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+               pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
 
                do {
                        start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
index c87f57ca44371586d30f56985010a115a38c8aa1..a95fbd5510d92e7b0beec7f72042ba5796caeee7 100644 (file)
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)
        /* Enable MagicPacket */
        ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
 
-       /* Increased clock usage so device won't be suspended */
-       clk_enable(priv->clk);
-
        return enable_irq_wake(priv->emac_irq);
 }
 
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)
        if (ret < 0)
                return ret;
 
-       /* Restore clock usage count */
-       clk_disable(priv->clk);
-
        return disable_irq_wake(priv->emac_irq);
 }
 
index a197e11f3a564470168dd64244f736b49f52c749..92dcf8717fc6e7bcf81195cc7ef8c61964a31540 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/slab.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
-#include <linux/clk.h>
 #include <linux/sh_eth.h>
 #include <linux/of_mdio.h>
 
@@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
        wol->supported = 0;
        wol->wolopts = 0;
 
-       if (mdp->cd->magic && mdp->clk) {
+       if (mdp->cd->magic) {
                wol->supported = WAKE_MAGIC;
                wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
        }
@@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
-       if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
+       if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
                return -EOPNOTSUPP;
 
        mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                goto out_release;
        }
 
-       /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
-       mdp->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(mdp->clk))
-               mdp->clk = NULL;
-
        ndev->base_addr = res->start;
 
        spin_lock_init(&mdp->lock);
@@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        if (ret)
                goto out_napi_del;
 
-       if (mdp->cd->magic && mdp->clk)
+       if (mdp->cd->magic)
                device_set_wakeup_capable(&pdev->dev, 1);
 
        /* print device information */
@@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)
        /* Enable MagicPacket */
        sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
 
-       /* Increased clock usage so device won't be suspended */
-       clk_enable(mdp->clk);
-
        return enable_irq_wake(ndev->irq);
 }
 
@@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)
        if (ret < 0)
                return ret;
 
-       /* Restore clock usage count */
-       clk_disable(mdp->clk);
-
        return disable_irq_wake(ndev->irq);
 }
 
index 63aca9f847e12b869cdbdc6ec159c606cdd7e4b2..4c2f612e4414d6dcddeb0303f7358ded9dbc75a3 100644 (file)
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC
 
 config SMC9194
        tristate "SMC 9194 support"
-       depends on (ISA || MAC && BROKEN)
+       depends on ISA
        select CRC32
        ---help---
          This is support for the SMC9xxx based Ethernet cards. Choose this
index a0f2be81d52e4a4fd24585d8957b181a3cf9a103..8fc02d9db3d011ee1c193b9cdfb8c26e042e6f3e 100644 (file)
@@ -1451,7 +1451,7 @@ destroy_macvlan_port:
        /* the macvlan port may be freed by macvlan_uninit when fail to register.
         * so we destroy the macvlan port only when it's valid.
         */
-       if (create && macvlan_port_get_rtnl(dev))
+       if (create && macvlan_port_get_rtnl(lowerdev))
                macvlan_port_destroy(port->dev);
        return err;
 }
index b13eed21c87dac6fcad78c4240cf76200a1c4288..d39ae77707ef0ac49e7df50cb0b84ab222ed6f67 100644 (file)
@@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev)
                ctl |= BMCR_FULLDPLX;
 
        return phy_modify(phydev, MII_BMCR,
-                         BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
+                         ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
 }
 EXPORT_SYMBOL(genphy_setup_forced);
 
index ca5e375de27c131534ce1bd2768b787be2fb908f..e0d6760f321951da99e38dd6dc8eb3749848e115 100644 (file)
@@ -166,6 +166,8 @@ struct tbnet_ring {
  * @connected_work: Worker that finalizes the ThunderboltIP connection
  *                 setup and enables DMA paths for high speed data
  *                 transfers
+ * @disconnect_work: Worker that handles tearing down the ThunderboltIP
+ *                  connection
  * @rx_hdr: Copy of the currently processed Rx frame. Used when a
  *         network packet consists of multiple Thunderbolt frames.
  *         In host byte order.
@@ -190,6 +192,7 @@ struct tbnet {
        int login_retries;
        struct delayed_work login_work;
        struct work_struct connected_work;
+       struct work_struct disconnect_work;
        struct thunderbolt_ip_frame_header rx_hdr;
        struct tbnet_ring rx_ring;
        atomic_t frame_id;
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
        case TBIP_LOGOUT:
                ret = tbnet_logout_response(net, route, sequence, command_id);
                if (!ret)
-                       tbnet_tear_down(net, false);
+                       queue_work(system_long_wq, &net->disconnect_work);
                break;
 
        default:
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)
        }
 }
 
+static void tbnet_disconnect_work(struct work_struct *work)
+{
+       struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
+
+       tbnet_tear_down(net, false);
+}
+
 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
                              const struct thunderbolt_ip_frame_header *hdr)
 {
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)
 
        napi_disable(&net->napi);
 
+       cancel_work_sync(&net->disconnect_work);
        tbnet_tear_down(net, true);
 
        tb_ring_free(net->rx_ring.ring);
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
        net = netdev_priv(dev);
        INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
        INIT_WORK(&net->connected_work, tbnet_connected_work);
+       INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
        mutex_init(&net->connection_lock);
        atomic_set(&net->command_id, 0);
        atomic_set(&net->frame_id, 0);
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
        stop_login(net);
        if (netif_running(net->dev)) {
                netif_device_detach(net->dev);
-               tb_ring_stop(net->rx_ring.ring);
-               tb_ring_stop(net->tx_ring.ring);
-               tbnet_free_buffers(&net->rx_ring);
-               tbnet_free_buffers(&net->tx_ring);
+               tbnet_tear_down(net, true);
        }
 
        return 0;
index 81e6cc951e7fc7c983919365c34842c34bcaedcf..b52258c327d2e1d7c7476de345e49f082909c246 100644 (file)
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
        skb->truesize += skb->data_len;
 
        for (i = 1; i < it->nr_segs; i++) {
+               struct page_frag *pfrag = &current->task_frag;
                size_t fragsz = it->iov[i].iov_len;
-               unsigned long offset;
-               struct page *page;
-               void *data;
 
                if (fragsz == 0 || fragsz > PAGE_SIZE) {
                        err = -EINVAL;
                        goto free;
                }
 
-               local_bh_disable();
-               data = napi_alloc_frag(fragsz);
-               local_bh_enable();
-               if (!data) {
+               if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
                        err = -ENOMEM;
                        goto free;
                }
 
-               page = virt_to_head_page(data);
-               offset = data - page_address(page);
-               skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
+               skb_fill_page_desc(skb, i - 1, pfrag->page,
+                                  pfrag->offset, fragsz);
+               page_ref_inc(pfrag->page);
+               pfrag->offset += fragsz;
        }
 
        return skb;
index d0a113743195acae86931c51eea50b94ddadd487..7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb 100644 (file)
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
        /* it's racing here! */
 
        ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
-       if (ret < 0)
+       if (ret < 0) {
                netdev_warn(dev->net, "Error writing RFE_CTL\n");
-
-       return ret;
+               return ret;
+       }
+       return 0;
 }
 
 static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
index 626c27352ae243fe6364d59d5274a718475a7587..9bb9e562b8934d33f65592db8077e5fa720f6cda 100644 (file)
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
        sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
 
        err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
-       if (unlikely(err)) {
-               struct page *page = virt_to_head_page(xdp->data);
-
-               put_page(page);
-               return false;
-       }
+       if (unlikely(err))
+               return false; /* Caller handle free/refcnt */
 
        return true;
 }
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
 static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
 {
        struct virtnet_info *vi = netdev_priv(dev);
-       bool sent = __virtnet_xdp_xmit(vi, xdp);
+       struct receive_queue *rq = vi->rq;
+       struct bpf_prog *xdp_prog;
+       bool sent;
 
+       /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
+        * indicate XDP resources have been successfully allocated.
+        */
+       xdp_prog = rcu_dereference(rq->xdp_prog);
+       if (!xdp_prog)
+               return -ENXIO;
+
+       sent = __virtnet_xdp_xmit(vi, xdp);
        if (!sent)
                return -ENOSPC;
        return 0;
@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
        unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        struct page *page = virt_to_head_page(buf);
-       unsigned int delta = 0, err;
+       unsigned int delta = 0;
        struct page *xdp_page;
+       bool sent;
+       int err;
+
        len -= vi->hdr_len;
 
        rcu_read_lock();
@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                void *orig_data;
                u32 act;
 
-               if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+               if (unlikely(hdr->hdr.gso_type))
                        goto err_xdp;
 
                if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
                        delta = orig_data - xdp.data;
                        break;
                case XDP_TX:
-                       if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+                       sent = __virtnet_xdp_xmit(vi, &xdp);
+                       if (unlikely(!sent)) {
                                trace_xdp_exception(vi->dev, xdp_prog, act);
-                       else
-                               *xdp_xmit = true;
+                               goto err_xdp;
+                       }
+                       *xdp_xmit = true;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
-                       if (!err)
-                               *xdp_xmit = true;
+                       if (err)
+                               goto err_xdp;
+                       *xdp_xmit = true;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        struct bpf_prog *xdp_prog;
        unsigned int truesize;
        unsigned int headroom = mergeable_ctx_to_headroom(ctx);
-       int err;
+       bool sent;
 
        head_skb = NULL;
 
@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                        }
                        break;
                case XDP_TX:
-                       if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+                       sent = __virtnet_xdp_xmit(vi, &xdp);
+                       if (unlikely(!sent)) {
                                trace_xdp_exception(vi->dev, xdp_prog, act);
-                       else
-                               *xdp_xmit = true;
+                               if (unlikely(xdp_page != page))
+                                       put_page(xdp_page);
+                               goto err_xdp;
+                       }
+                       *xdp_xmit = true;
                        if (unlikely(xdp_page != page))
                                goto err_xdp;
                        rcu_read_unlock();
                        goto xdp_xmit;
-               case XDP_REDIRECT:
-                       err = xdp_do_redirect(dev, &xdp, xdp_prog);
-                       if (!err)
-                               *xdp_xmit = true;
-                       rcu_read_unlock();
-                       goto xdp_xmit;
                default:
                        bpf_warn_invalid_xdp_action(act);
                case XDP_ABORTED:
index 1cf22e62e3dddafcbf3136f8ba07aa397b9bb088..6e0af815f25ec78eacafc61e286cff5fe85170bf 100644 (file)
@@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void)
 
        spin_lock_init(&hwsim_radio_lock);
 
-       hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+       hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
        if (!hwsim_wq)
                return -ENOMEM;
        rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
index 8328d395e332919b5f5736dc2fc1a519b8951896..3127bc8633ca511889e8098d1a996c30c6d28b3f 100644 (file)
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev,
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
+               break;
+
        case XenbusStateUnknown:
+               wake_up_all(&module_unload_q);
                break;
 
        case XenbusStateInitWait:
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev)
                xenbus_switch_state(dev, XenbusStateClosing);
                wait_event(module_unload_q,
                           xenbus_read_driver_state(dev->otherend) ==
-                          XenbusStateClosing);
+                          XenbusStateClosing ||
+                          xenbus_read_driver_state(dev->otherend) ==
+                          XenbusStateUnknown);
 
                xenbus_switch_state(dev, XenbusStateClosed);
                wait_event(module_unload_q,
index 0fe7ea35c2217406af6f8c071a417e08e98dab6a..817e5e2766da36b7312e6992952b073ae2e0111a 100644 (file)
@@ -2844,7 +2844,7 @@ out:
 }
 
 static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
-               struct nvme_id_ns *id, bool *new)
+               struct nvme_id_ns *id)
 {
        struct nvme_ctrl *ctrl = ns->ctrl;
        bool is_shared = id->nmic & (1 << 0);
@@ -2860,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                        ret = PTR_ERR(head);
                        goto out_unlock;
                }
-
-               *new = true;
        } else {
                struct nvme_ns_ids ids;
 
@@ -2873,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                        ret = -EINVAL;
                        goto out_unlock;
                }
-
-               *new = false;
        }
 
        list_add_tail(&ns->siblings, &head->list);
@@ -2945,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct nvme_id_ns *id;
        char disk_name[DISK_NAME_LEN];
        int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
-       bool new = true;
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
@@ -2971,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (id->ncap == 0)
                goto out_free_id;
 
-       if (nvme_init_ns_head(ns, nsid, id, &new))
+       if (nvme_init_ns_head(ns, nsid, id))
                goto out_free_id;
        nvme_setup_streams_ns(ctrl, ns);
        
@@ -3037,8 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
                pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
                        ns->disk->disk_name);
 
-       if (new)
-               nvme_mpath_add_disk(ns->head);
+       nvme_mpath_add_disk(ns->head);
        nvme_mpath_add_disk_links(ns);
        return;
  out_unlink_ns:
index 5dd4ceefed8fe0d0897aa8dadb1d266174b2eb02..a1c58e35075e9e180cae240d6951c807bb7a4a82 100644 (file)
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
  */
 int nvmf_register_transport(struct nvmf_transport_ops *ops)
 {
-       if (!ops->create_ctrl || !ops->module)
+       if (!ops->create_ctrl)
                return -EINVAL;
 
        down_write(&nvmf_transports_rwsem);
index 3b211d9e58b8419855b29a87f1019b0d687714d4..b7e5c6db4d92fe61313fdb9120cef307204bfd70 100644 (file)
@@ -198,11 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
-       device_add_disk(&head->subsys->dev, head->disk);
-       if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
-                       &nvme_ns_id_attr_group))
-               pr_warn("%s: failed to create sysfs group for identification\n",
-                       head->disk->disk_name);
+
+       mutex_lock(&head->subsys->lock);
+       if (!(head->disk->flags & GENHD_FL_UP)) {
+               device_add_disk(&head->subsys->dev, head->disk);
+               if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
+                               &nvme_ns_id_attr_group))
+                       pr_warn("%s: failed to create sysfs group for identification\n",
+                               head->disk->disk_name);
+       }
+       mutex_unlock(&head->subsys->lock);
 }
 
 void nvme_mpath_add_disk_links(struct nvme_ns *ns)
index 73036d2fbbd58da19fbcd880accafdcb3f8165e7..5933a5c732e8349731766eeb7b7c0b0b9b6bea6c 100644 (file)
@@ -1459,7 +1459,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        nvmeq->cq_vector = qid - 1;
        result = adapter_alloc_cq(dev, qid, nvmeq);
        if (result < 0)
-               return result;
+               goto release_vector;
 
        result = adapter_alloc_sq(dev, qid, nvmeq);
        if (result < 0)
@@ -1473,9 +1473,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
        return result;
 
  release_sq:
+       dev->online_queues--;
        adapter_delete_sq(dev, qid);
  release_cq:
        adapter_delete_cq(dev, qid);
+ release_vector:
+       nvmeq->cq_vector = -1;
        return result;
 }
 
index 3a51ed50eff24a4c2541b6051a2918f41920467d..4d84a73ee12d06907ea94ef77e3e654d6e2b2033 100644 (file)
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
 
-       if (!blk_rq_bytes(rq))
+       if (!blk_rq_payload_bytes(rq))
                return;
 
        if (req->mr) {
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 
        c->common.flags |= NVME_CMD_SGL_METABUF;
 
-       if (!blk_rq_bytes(rq))
+       if (!blk_rq_payload_bytes(rq))
                return nvme_rdma_set_sg_null(c);
 
        req->sg_table.sgl = req->first_sgl;
index 0bd737117a80a172745aab1868fb8a4ecf6f1e8c..a78029e4e5f481b58bd628537315d3e7e9882f1a 100644 (file)
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
                goto fail;
        }
 
-       /* either variant of SGLs is fine, as we don't support metadata */
-       if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
-                    (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
+       /*
+        * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
+        * contains an address of a single contiguous physical buffer that is
+        * byte aligned.
+        */
+       if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
                status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto fail;
        }
index 7991ec3a17db9238c4fddaadc2faaa9fb32ce0cb..861d1509b22bf412e2a9bfaee408a4fcb3ad3de4 100644 (file)
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                return BLK_STS_OK;
        }
 
-       if (blk_rq_bytes(req)) {
+       if (blk_rq_payload_bytes(req)) {
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
-               iod->req.transfer_len = blk_rq_bytes(req);
+               iod->req.transfer_len = blk_rq_payload_bytes(req);
        }
 
        blk_mq_start_request(req);
index fc734014206fb0845e9d6549e2b7b554f8845919..8b14bd326d4af32fbdaedd1258e2bb7c3ea489f5 100644 (file)
@@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
 
 static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
 {
-       pci_set_vpd_size(dev, 8192);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+       int chip = (dev->device & 0xf000) >> 12;
+       int func = (dev->device & 0x0f00) >>  8;
+       int prod = (dev->device & 0x00ff) >>  0;
+
+       /*
+        * If this is a T3-based adapter, there's a 1KB VPD area at offset
+        * 0xc00 which contains the preferred VPD values.  If this is a T4 or
+        * later based adapter, the special VPD is at offset 0x400 for the
+        * Physical Functions (the SR-IOV Virtual Functions have no VPD
+        * Capabilities).  The PCI VPD Access core routines will normally
+        * compute the size of the VPD by parsing the VPD Data Structure at
+        * offset 0x000.  This will result in silent failures when attempting
+        * to accesses these other VPD areas which are beyond those computed
+        * limits.
+        */
+       if (chip == 0x0 && prod >= 0x20)
+               pci_set_vpd_size(dev, 8192);
+       else if (chip >= 0x4 && func < 0x8)
+               pci_set_vpd_size(dev, 2048);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+                       quirk_chelsio_extend_vpd);
 
 #ifdef CONFIG_ACPI
 /*
index 369d48d6c6f1a53d4bc6ab5dd0b13a33eeccd90b..365447240d95fe2bb2d484bddca71da3572daaa9 100644 (file)
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno)
        struct resource *res = dev->resource + resno;
 
        pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
+
+       if (!res->parent)
+               return;
+
        release_resource(res);
        res->end = resource_size(res) - 1;
        res->start = 0;
index 7bc5eee96b310756f625499b805385f2ea95689a..0c2ed11c0603015e10c3995f21a768801fe90427 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/sched/clock.h>
 #include <linux/spinlock.h>
@@ -26,6 +25,9 @@
 
 #include <asm/irq_regs.h>
 
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
+static DEFINE_PER_CPU(int, cpu_irq);
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
                                      [PERF_COUNT_HW_CACHE_MAX]
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event)
        return 0;
 }
 
-static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
-{
-       struct platform_device *pdev = armpmu->plat_device;
-
-       return pdev ? dev_get_platdata(&pdev->dev) : NULL;
-}
-
 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 {
        struct arm_pmu *armpmu;
-       struct arm_pmu_platdata *plat;
        int ret;
        u64 start_clock, finish_clock;
 
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
         * dereference.
         */
        armpmu = *(void **)dev;
-
-       plat = armpmu_get_platdata(armpmu);
+       if (WARN_ON_ONCE(!armpmu))
+               return IRQ_NONE;
 
        start_clock = sched_clock();
-       if (plat && plat->handle_irq)
-               ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
-       else
-               ret = armpmu->handle_irq(irq, armpmu);
+       ret = armpmu->handle_irq(irq, armpmu);
        finish_clock = sched_clock();
 
        perf_sample_event_took(finish_clock - start_clock);
@@ -531,54 +522,41 @@ int perf_num_counters(void)
 }
 EXPORT_SYMBOL_GPL(perf_num_counters);
 
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
+static int armpmu_count_irq_users(const int irq)
 {
-       struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
-       int irq = per_cpu(hw_events->irq, cpu);
+       int cpu, count = 0;
 
-       if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
-               return;
-
-       if (irq_is_percpu_devid(irq)) {
-               free_percpu_irq(irq, &hw_events->percpu_pmu);
-               cpumask_clear(&armpmu->active_irqs);
-               return;
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(cpu_irq, cpu) == irq)
+                       count++;
        }
 
-       free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+       return count;
 }
 
-void armpmu_free_irqs(struct arm_pmu *armpmu)
+void armpmu_free_irq(int irq, int cpu)
 {
-       int cpu;
+       if (per_cpu(cpu_irq, cpu) == 0)
+               return;
+       if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
+               return;
+
+       if (!irq_is_percpu_devid(irq))
+               free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
+       else if (armpmu_count_irq_users(irq) == 1)
+               free_percpu_irq(irq, &cpu_armpmu);
 
-       for_each_cpu(cpu, &armpmu->supported_cpus)
-               armpmu_free_irq(armpmu, cpu);
+       per_cpu(cpu_irq, cpu) = 0;
 }
 
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
+int armpmu_request_irq(int irq, int cpu)
 {
        int err = 0;
-       struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
        const irq_handler_t handler = armpmu_dispatch_irq;
-       int irq = per_cpu(hw_events->irq, cpu);
        if (!irq)
                return 0;
 
-       if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
-               err = request_percpu_irq(irq, handler, "arm-pmu",
-                                        &hw_events->percpu_pmu);
-       } else if (irq_is_percpu_devid(irq)) {
-               int other_cpu = cpumask_first(&armpmu->active_irqs);
-               int other_irq = per_cpu(hw_events->irq, other_cpu);
-
-               if (irq != other_irq) {
-                       pr_warn("mismatched PPIs detected.\n");
-                       err = -EINVAL;
-                       goto err_out;
-               }
-       } else {
-               struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+       if (!irq_is_percpu_devid(irq)) {
                unsigned long irq_flags;
 
                err = irq_force_affinity(irq, cpumask_of(cpu));
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
                        goto err_out;
                }
 
-               if (platdata && platdata->irq_flags) {
-                       irq_flags = platdata->irq_flags;
-               } else {
-                       irq_flags = IRQF_PERCPU |
-                                   IRQF_NOBALANCING |
-                                   IRQF_NO_THREAD;
-               }
+               irq_flags = IRQF_PERCPU |
+                           IRQF_NOBALANCING |
+                           IRQF_NO_THREAD;
 
+               irq_set_status_flags(irq, IRQ_NOAUTOEN);
                err = request_irq(irq, handler, irq_flags, "arm-pmu",
-                                 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+                                 per_cpu_ptr(&cpu_armpmu, cpu));
+       } else if (armpmu_count_irq_users(irq) == 0) {
+               err = request_percpu_irq(irq, handler, "arm-pmu",
+                                        &cpu_armpmu);
        }
 
        if (err)
                goto err_out;
 
-       cpumask_set_cpu(cpu, &armpmu->active_irqs);
+       per_cpu(cpu_irq, cpu) = irq;
        return 0;
 
 err_out:
@@ -612,19 +590,6 @@ err_out:
        return err;
 }
 
-int armpmu_request_irqs(struct arm_pmu *armpmu)
-{
-       int cpu, err;
-
-       for_each_cpu(cpu, &armpmu->supported_cpus) {
-               err = armpmu_request_irq(armpmu, cpu);
-               if (err)
-                       break;
-       }
-
-       return err;
-}
-
 static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
 {
        struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
        if (pmu->reset)
                pmu->reset(pmu);
 
+       per_cpu(cpu_armpmu, cpu) = pmu;
+
        irq = armpmu_get_cpu_irq(pmu, cpu);
        if (irq) {
-               if (irq_is_percpu_devid(irq)) {
+               if (irq_is_percpu_devid(irq))
                        enable_percpu_irq(irq, IRQ_TYPE_NONE);
-                       return 0;
-               }
+               else
+                       enable_irq(irq);
        }
 
        return 0;
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
                return 0;
 
        irq = armpmu_get_cpu_irq(pmu, cpu);
-       if (irq && irq_is_percpu_devid(irq))
-               disable_percpu_irq(irq);
+       if (irq) {
+               if (irq_is_percpu_devid(irq))
+                       disable_percpu_irq(irq);
+               else
+                       disable_irq(irq);
+       }
+
+       per_cpu(cpu_armpmu, cpu) = NULL;
 
        return 0;
 }
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
                                            &cpu_pmu->node);
 }
 
-struct arm_pmu *armpmu_alloc(void)
+static struct arm_pmu *__armpmu_alloc(gfp_t flags)
 {
        struct arm_pmu *pmu;
        int cpu;
 
-       pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+       pmu = kzalloc(sizeof(*pmu), flags);
        if (!pmu) {
                pr_info("failed to allocate PMU device!\n");
                goto out;
        }
 
-       pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+       pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
        if (!pmu->hw_events) {
                pr_info("failed to allocate per-cpu PMU data.\n");
                goto out_free_pmu;
@@ -857,6 +830,17 @@ out:
        return NULL;
 }
 
+struct arm_pmu *armpmu_alloc(void)
+{
+       return __armpmu_alloc(GFP_KERNEL);
+}
+
+struct arm_pmu *armpmu_alloc_atomic(void)
+{
+       return __armpmu_alloc(GFP_ATOMIC);
+}
+
+
 void armpmu_free(struct arm_pmu *pmu)
 {
        free_percpu(pmu->hw_events);
index 705f1a390e3123a2d305e0468cec8194d972ed39..0f197516d7089cf970926501aca996a9ab79a7f8 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/acpi.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
 #include <linux/percpu.h>
 #include <linux/perf/arm_pmu.h>
 
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
                        pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
                }
 
+               /*
+                * Log and request the IRQ so the core arm_pmu code can manage
+                * it. We'll have to sanity-check IRQs later when we associate
+                * them with their PMUs.
+                */
                per_cpu(pmu_irqs, cpu) = irq;
+               armpmu_request_irq(irq, cpu);
        }
 
        return 0;
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
                return pmu;
        }
 
-       pmu = armpmu_alloc();
+       pmu = armpmu_alloc_atomic();
        if (!pmu) {
                pr_warn("Unable to allocate PMU for CPU%d\n",
                        smp_processor_id());
@@ -139,6 +147,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
        return pmu;
 }
 
+/*
+ * Check whether the new IRQ is compatible with those already associated with
+ * the PMU (e.g. we don't have mismatched PPIs).
+ */
+static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
+{
+       struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+       int cpu;
+
+       if (!irq)
+               return true;
+
+       for_each_cpu(cpu, &pmu->supported_cpus) {
+               int other_irq = per_cpu(hw_events->irq, cpu);
+               if (!other_irq)
+                       continue;
+
+               if (irq == other_irq)
+                       continue;
+               if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
+                       continue;
+
+               pr_warn("mismatched PPIs detected\n");
+               return false;
+       }
+
+       return true;
+}
+
 /*
  * This must run before the common arm_pmu hotplug logic, so that we can
  * associate a CPU and its interrupt before the common code tries to manage the
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
        if (!pmu)
                return -ENOMEM;
 
-       cpumask_set_cpu(cpu, &pmu->supported_cpus);
-
        per_cpu(probed_pmus, cpu) = pmu;
 
-       /*
-        * Log and request the IRQ so the core arm_pmu code can manage it.  In
-        * some situations (e.g. mismatched PPIs), we may fail to request the
-        * IRQ. However, it may be too late for us to do anything about it.
-        * The common ARM PMU code will log a warning in this case.
-        */
-       hw_events = pmu->hw_events;
-       per_cpu(hw_events->irq, cpu) = irq;
-       armpmu_request_irq(pmu, cpu);
+       if (pmu_irq_matches(pmu, irq)) {
+               hw_events = pmu->hw_events;
+               per_cpu(hw_events->irq, cpu) = irq;
+       }
+
+       cpumask_set_cpu(cpu, &pmu->supported_cpus);
 
        /*
         * Ideally, we'd probe the PMU here when we find the first matching
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)
        if (acpi_disabled)
                return 0;
 
-       /*
-        * We can't request IRQs yet, since we don't know the cookie value
-        * until we know which CPUs share the same logical PMU. We'll handle
-        * that in arm_pmu_acpi_cpu_starting().
-        */
        ret = arm_pmu_acpi_parse_irqs();
        if (ret)
                return ret;
index 46501cc79fd7c3f3b0ac4557ce4a06811636f338..7729eda5909df180ae686ee8206e35a75b9ca476 100644 (file)
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
                        pdev->dev.of_node);
        }
 
-       /*
-        * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
-        * special platdata function that attempts to demux them.
-        */
-       if (dev_get_platdata(&pdev->dev))
-               cpumask_setall(&pmu->supported_cpus);
-
        for (i = 0; i < num_irqs; i++) {
                int cpu, irq;
 
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
        return 0;
 }
 
+static int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+       struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+       int cpu, err;
+
+       for_each_cpu(cpu, &armpmu->supported_cpus) {
+               int irq = per_cpu(hw_events->irq, cpu);
+               if (!irq)
+                       continue;
+
+               err = armpmu_request_irq(irq, cpu);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+static void armpmu_free_irqs(struct arm_pmu *armpmu)
+{
+       int cpu;
+       struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+
+       for_each_cpu(cpu, &armpmu->supported_cpus) {
+               int irq = per_cpu(hw_events->irq, cpu);
+
+               armpmu_free_irq(irq, cpu);
+       }
+}
+
 int arm_pmu_device_probe(struct platform_device *pdev,
                         const struct of_device_id *of_table,
                         const struct pmu_probe_info *probe_table)
index 1fda9d6c7ea3f39b3768f142136b7e11f9e0c870..4b91ff74779bead16a60f96bf2566e947989e179 100644 (file)
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = {
        "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x",
 };
 
-static const char * const uart_ao_b_gpioz_groups[] = {
+static const char * const uart_ao_b_z_groups[] = {
        "uart_ao_tx_b_z", "uart_ao_rx_b_z",
        "uart_ao_cts_b_z", "uart_ao_rts_b_z",
 };
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = {
        FUNCTION(nand),
        FUNCTION(uart_a),
        FUNCTION(uart_b),
-       FUNCTION(uart_ao_b_gpioz),
+       FUNCTION(uart_ao_b_z),
        FUNCTION(i2c0),
        FUNCTION(i2c1),
        FUNCTION(i2c2),
index d1a01311c1a2937b6863bea6b696566e376f8ac1..5e3df194723e1a1f1757fff08dd3c89069381350 100644 (file)
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
 
+       device_init_wakeup(&device->dev, false);
        acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
        intel_hid_set_enable(&device->dev, false);
        intel_button_array_enable(&device->dev, false);
index b703d6f5b099b3d0d74b77d60d2e5401ff3b816e..c13780b8dabbe089c24a2b5c323f1510f589153e 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <linux/input.h>
 #include <linux/input/sparse-keymap.h>
 #include <linux/kernel.h>
@@ -97,9 +98,35 @@ out_unknown:
        dev_dbg(&device->dev, "unknown event index 0x%x\n", event);
 }
 
-static int intel_vbtn_probe(struct platform_device *device)
+static void detect_tablet_mode(struct platform_device *device)
 {
+       const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+       struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
+       acpi_handle handle = ACPI_HANDLE(&device->dev);
        struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       acpi_status status;
+       int m;
+
+       if (!(chassis_type && strcmp(chassis_type, "31") == 0))
+               goto out;
+
+       status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
+       if (ACPI_FAILURE(status))
+               goto out;
+
+       obj = vgbs_output.pointer;
+       if (!(obj && obj->type == ACPI_TYPE_INTEGER))
+               goto out;
+
+       m = !(obj->integer.value & TABLET_MODE_FLAG);
+       input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
+out:
+       kfree(vgbs_output.pointer);
+}
+
+static int intel_vbtn_probe(struct platform_device *device)
+{
        acpi_handle handle = ACPI_HANDLE(&device->dev);
        struct intel_vbtn_priv *priv;
        acpi_status status;
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device)
                return err;
        }
 
-       /*
-        * VGBS being present and returning something means we have
-        * a tablet mode switch.
-        */
-       status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output);
-       if (ACPI_SUCCESS(status)) {
-               union acpi_object *obj = vgbs_output.pointer;
-
-               if (obj && obj->type == ACPI_TYPE_INTEGER) {
-                       int m = !(obj->integer.value & TABLET_MODE_FLAG);
-
-                       input_report_switch(priv->input_dev, SW_TABLET_MODE, m);
-               }
-       }
-
-       kfree(vgbs_output.pointer);
+       detect_tablet_mode(device);
 
        status = acpi_install_notify_handler(handle,
                                             ACPI_DEVICE_NOTIFY,
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device)
 {
        acpi_handle handle = ACPI_HANDLE(&device->dev);
 
+       device_init_wakeup(&device->dev, false);
        acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
 
        /*
index c0c8945603cbbdd6284ff60b5df4d118502305d5..8796211ef24acdab6e20f976716c18577d5a8fed 100644 (file)
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev)
                wblock->char_dev.mode = 0444;
                ret = misc_register(&wblock->char_dev);
                if (ret) {
-                       dev_warn(dev, "failed to register char dev: %d", ret);
+                       dev_warn(dev, "failed to register char dev: %d\n", ret);
                        ret = -ENOMEM;
                        goto probe_misc_failure;
                }
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
 
        if (result) {
                dev_warn(wmi_bus_dev,
-                        "%s data block query control method not found",
+                        "%s data block query control method not found\n",
                         method);
                return result;
        }
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
 
                retval = device_add(&wblock->dev.dev);
                if (retval) {
-                       dev_err(wmi_bus_dev, "failed to register %pULL\n",
+                       dev_err(wmi_bus_dev, "failed to register %pUL\n",
                                wblock->gblock.guid);
                        if (debug_event)
                                wmi_method_enable(wblock, 0);
index fcfd28d2884c546de6b26f1daf1d8773518291de..de1b3fce936d5d7d3ee70c2dabc3957e911b438f 100644 (file)
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
 CFLAGS_ncr53c8xx.o     := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
 zalon7xx-objs  := zalon.o ncr53c8xx.o
 NCR_Q720_mod-objs      := NCR_Q720.o ncr53c8xx.o
-oktagon_esp_mod-objs   := oktagon_esp.o oktagon_io.o
 
 # Files generated that shall be removed upon make clean
 clean-files := 53c700_d.h 53c700_u.h
index b3b931ab77ebdfc2190b113489bb1bcf81edd623..2664ea0df35fa1384db54c3e8f0ff270cba09148 100644 (file)
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
         *      Map in the registers from the adapter.
         */
        aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
-       if ((*aac_drivers[index].init)(aac))
+       if ((*aac_drivers[index].init)(aac)) {
+               error = -ENODEV;
                goto out_unmap;
+       }
 
        if (aac->sync_mode) {
                if (aac_sync_mode)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
deleted file mode 100644 (file)
index 828ae3d..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Implementation of Utility functions for all SCSI device types.
- *
- * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
- * Copyright (c) 1997, 1998 Kenneth D. Merry.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions, and the following disclaimer,
- *    without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- *    derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
- * $Id$
- */
-
-#include "aiclib.h"
-
index 8e2f767147cb43de2acdcabeab820537fc3422d3..5a645b8b9af170d66a2abd510393c5f5c5725aa8 100644 (file)
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
                /* we will not receive ABTS response for this IO */
                BNX2FC_IO_DBG(io_req, "Timer context finished processing "
                           "this scsi cmd\n");
+               return;
        }
 
        /* Cancel the timeout_work, as we received IO completion */
index be5ee2d37815510226627261955f75d842711fd6..7dbbbb81a1e7de64fc3f8ad3079b4a5879153f39 100644 (file)
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = {
 static struct csio_lnode *
 csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
 {
-       struct csio_lnode *ln = hw->rln;
+       struct csio_lnode *ln;
        struct list_head *tmp;
 
        /* Match siblings lnode with portid */
index 022e421c218513055f0e4033611736523751cbb6..4b44325d1a82868b94909240347c3d6258c5a794 100644 (file)
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work)
 
 /**
  * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ * @pg: ALUA port group associated with @sdev.
+ * @sdev: SCSI device for which to submit an RTPG.
+ * @qdata: Information about the callback to invoke after the RTPG.
+ * @force: Whether or not to submit an RTPG if a work item that will submit an
+ *         RTPG already has been scheduled.
  *
  * Returns true if and only if alua_rtpg_work() will be called asynchronously.
  * That function is responsible for calling @qdata->fn().
index 9a0696f68f37153a5b7647c0e5c22ac69574f583..b81a53c4a9a8b1020a96a85fd5e84cde2adb9900 100644 (file)
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {
 };
 
 struct ibmvfc_fcp_rsp_info {
-       __be16 reserved;
+       u8 reserved[3];
        u8 rsp_code;
        u8 reserved2[4];
 }__attribute__((packed, aligned (2)));
index 13d6e4ec3022cc6c38e5b36496556f5bad998afa..59a87ca328d36d39d2d95a1e6bd39e25573065a9 100644 (file)
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
                                continue;
                        }
 
-                       for_each_cpu(cpu, mask)
+                       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+                               if (cpu >= ioc->cpu_msix_table_sz)
+                                       break;
                                ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+                       }
                }
                return;
        }
index 029e2e69b29f8dedd43b937212a23b81a7aca3e7..f57a94b4f0d91636590a5ea1994e2730f8d00512 100644 (file)
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
 {
        struct qedi_ctx *qedi = data;
        struct nvm_iscsi_initiator *initiator;
-       char *str = buf;
        int rc = 1;
        u32 ipv6_en, dhcp_en, ip_len;
        struct nvm_iscsi_block *block;
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
 
        switch (type) {
        case ISCSI_BOOT_ETH_IP_ADDR:
-               rc = snprintf(str, ip_len, fmt, ip);
+               rc = snprintf(buf, ip_len, fmt, ip);
                break;
        case ISCSI_BOOT_ETH_SUBNET_MASK:
-               rc = snprintf(str, ip_len, fmt, sub);
+               rc = snprintf(buf, ip_len, fmt, sub);
                break;
        case ISCSI_BOOT_ETH_GATEWAY:
-               rc = snprintf(str, ip_len, fmt, gw);
+               rc = snprintf(buf, ip_len, fmt, gw);
                break;
        case ISCSI_BOOT_ETH_FLAGS:
-               rc = snprintf(str, 3, "%hhd\n",
+               rc = snprintf(buf, 3, "%hhd\n",
                              SYSFS_FLAG_FW_SEL_BOOT);
                break;
        case ISCSI_BOOT_ETH_INDEX:
-               rc = snprintf(str, 3, "0\n");
+               rc = snprintf(buf, 3, "0\n");
                break;
        case ISCSI_BOOT_ETH_MAC:
-               rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+               rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
                break;
        case ISCSI_BOOT_ETH_VLAN:
-               rc = snprintf(str, 12, "%d\n",
+               rc = snprintf(buf, 12, "%d\n",
                              GET_FIELD2(initiator->generic_cont0,
                                         NVM_ISCSI_CFG_INITIATOR_VLAN));
                break;
        case ISCSI_BOOT_ETH_ORIGIN:
                if (dhcp_en)
-                       rc = snprintf(str, 3, "3\n");
+                       rc = snprintf(buf, 3, "3\n");
                break;
        default:
                rc = 0;
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
 {
        struct qedi_ctx *qedi = data;
        struct nvm_iscsi_initiator *initiator;
-       char *str = buf;
        int rc;
        struct nvm_iscsi_block *block;
 
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
 
        switch (type) {
        case ISCSI_BOOT_INI_INITIATOR_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
-                             initiator->initiator_name.byte);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+                            initiator->initiator_name.byte);
                break;
        default:
                rc = 0;
@@ -1860,7 +1858,6 @@ static ssize_t
 qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                        char *buf, enum qedi_nvm_tgts idx)
 {
-       char *str = buf;
        int rc = 1;
        u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
        struct nvm_iscsi_block *block;
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
 
        switch (type) {
        case ISCSI_BOOT_TGT_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
-                             block->target[idx].target_name.byte);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+                            block->target[idx].target_name.byte);
                break;
        case ISCSI_BOOT_TGT_IP_ADDR:
                if (ipv6_en)
-                       rc = snprintf(str, ip_len, "%pI6\n",
+                       rc = snprintf(buf, ip_len, "%pI6\n",
                                      block->target[idx].ipv6_addr.byte);
                else
-                       rc = snprintf(str, ip_len, "%pI4\n",
+                       rc = snprintf(buf, ip_len, "%pI4\n",
                                      block->target[idx].ipv4_addr.byte);
                break;
        case ISCSI_BOOT_TGT_PORT:
-               rc = snprintf(str, 12, "%d\n",
+               rc = snprintf(buf, 12, "%d\n",
                              GET_FIELD2(block->target[idx].generic_cont0,
                                         NVM_ISCSI_CFG_TARGET_TCP_PORT));
                break;
        case ISCSI_BOOT_TGT_LUN:
-               rc = snprintf(str, 22, "%.*d\n",
+               rc = snprintf(buf, 22, "%.*d\n",
                              block->target[idx].lun.value[1],
                              block->target[idx].lun.value[0]);
                break;
        case ISCSI_BOOT_TGT_CHAP_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
-                             chap_name);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            chap_name);
                break;
        case ISCSI_BOOT_TGT_CHAP_SECRET:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
-                             chap_secret);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            chap_secret);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
-                             mchap_name);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            mchap_name);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-               rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
-                             mchap_secret);
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+                            mchap_secret);
                break;
        case ISCSI_BOOT_TGT_FLAGS:
-               rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+               rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
                break;
        case ISCSI_BOOT_TGT_NIC_ASSOC:
-               rc = snprintf(str, 3, "0\n");
+               rc = snprintf(buf, 3, "0\n");
                break;
        default:
                rc = 0;
index aececf664654df95c04ff005dcd90f69a5fe5622..2dea1129d3967f04e775ef09cd71de8ba9ddce30 100644 (file)
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t)
        req->outstanding_cmds[sp->handle] = NULL;
        iocb = &sp->u.iocb_cmd;
        iocb->timeout(sp);
-       if (sp->type != SRB_ELS_DCMD)
-               sp->free(sp);
        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 }
 
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data)
        srb_t *sp = data;
        fc_port_t *fcport = sp->fcport;
        struct srb_iocb *lio = &sp->u.iocb_cmd;
-       struct event_arg ea;
 
        if (fcport) {
                ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data)
 
        switch (sp->type) {
        case SRB_LOGIN_CMD:
-               if (!fcport)
-                       break;
                /* Retry as needed. */
                lio->u.logio.data[0] = MBS_COMMAND_ERROR;
                lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
                        QLA_LOGIO_LOGIN_RETRIED : 0;
-               memset(&ea, 0, sizeof(ea));
-               ea.event = FCME_PLOGI_DONE;
-               ea.fcport = sp->fcport;
-               ea.data[0] = lio->u.logio.data[0];
-               ea.data[1] = lio->u.logio.data[1];
-               ea.sp = sp;
-               qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+               sp->done(sp, QLA_FUNCTION_TIMEOUT);
                break;
        case SRB_LOGOUT_CMD:
-               if (!fcport)
-                       break;
-               qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
-               break;
        case SRB_CT_PTHRU_CMD:
        case SRB_MB_IOCB:
        case SRB_NACK_PLOGI:
@@ -235,12 +220,10 @@ static void
 qla2x00_async_logout_sp_done(void *ptr, int res)
 {
        srb_t *sp = ptr;
-       struct srb_iocb *lio = &sp->u.iocb_cmd;
 
        sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-       if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
-               qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
-                   lio->u.logio.data);
+       sp->fcport->login_gen++;
+       qlt_logo_completion_handler(sp->fcport, res);
        sp->free(sp);
 }
 
index 1b62e943ec49c15dabf3054946619a944980cba5..8d00d559bd2659b13bb2362034dffd855a370d44 100644 (file)
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
        abt_iocb->entry_type = ABORT_IOCB_TYPE;
        abt_iocb->entry_count = 1;
-       abt_iocb->handle =
-            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
-                aio->u.abt.cmd_hndl));
+       abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
        abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        abt_iocb->handle_to_abort =
-           cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+           cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+                                   aio->u.abt.cmd_hndl));
        abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
        abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
        abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
index 14109d86c3f6a786313bbf83d4c973e60553e33e..89f93ebd819d7590a5ece0dd689f6bd350be7393 100644 (file)
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 
        /* Read all mbox registers? */
-       mboxes = (1 << ha->mbx_count) - 1;
+       WARN_ON_ONCE(ha->mbx_count > 32);
+       mboxes = (1ULL << ha->mbx_count) - 1;
        if (!ha->mcp)
                ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
        else
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
        /* Read all mbox registers? */
-       mboxes = (1 << ha->mbx_count) - 1;
+       WARN_ON_ONCE(ha->mbx_count > 32);
+       mboxes = (1ULL << ha->mbx_count) - 1;
        if (!ha->mcp)
                ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
        else
index 12ee6e02d146d169940ead69af93d42d79d6508b..afcb5567998a51d6b1aae1c4872d0b2d5312ec71 100644 (file)
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       qla2x00_wait_for_sess_deletion(base_vha);
+
        /*
         * if UNLOAD flag is already set, then continue unload,
         * where it was set first.
index fc89af8fe2569a65d8b2751e72b74699977f9504..896b2d8bd8035ece6c42e99455bad596553cc594 100644 (file)
@@ -4871,8 +4871,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                                    sess);
                                qlt_send_term_imm_notif(vha, iocb, 1);
                                res = 0;
-                               spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
-                                   flags);
                                break;
                        }
 
index fc233717355fe22687678c805069478eddd99113..817f312023a999a3561794472c65c31c1ba18605 100644 (file)
 #define DEV_DB_NON_PERSISTENT  0
 #define DEV_DB_PERSISTENT      1
 
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
 #define COPY_ISID(dst_isid, src_isid) {                        \
        int i, j;                                       \
        for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)  \
index 82e889bbe0ed8f1959de20fce2d4af88e493508e..fc2c97d9a0d60de85b6fbf97a2202b3fd0267928 100644 (file)
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
 
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+       u32 reg_val = 0;
+       int rval = QLA_SUCCESS;
+
+       if (is_qla8022(ha))
+               reg_val = readl(&ha->qla4_82xx_reg->host_status);
+       else if (is_qla8032(ha) || is_qla8042(ha))
+               reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+       else
+               reg_val = readw(&ha->reg->ctrl_status);
+
+       if (reg_val == QL4_ISP_REG_DISCONNECT)
+               rval = QLA_ERROR;
+
+       return rval;
+}
+
 static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
                             uint32_t iface_type, uint32_t payload_size,
                             uint32_t pid, struct sockaddr *dst_addr)
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
        struct srb *srb = NULL;
        int ret = SUCCESS;
        int wait = 0;
+       int rval;
 
        ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
                   ha->host_no, id, lun, cmd, cmd->cmnd[0]);
 
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
+
        spin_lock_irqsave(&ha->hardware_lock, flags);
        srb = (struct srb *) CMD_SP(cmd);
        if (!srb) {
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
        struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
        struct ddb_entry *ddb_entry = cmd->device->hostdata;
        int ret = FAILED, stat;
+       int rval;
 
        if (!ddb_entry)
                return ret;
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
                      cmd, jiffies, cmd->request->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
 
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
+
        /* FIXME: wait for hba to go online */
        stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
        if (stat != QLA_SUCCESS) {
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
        struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
        struct ddb_entry *ddb_entry = cmd->device->hostdata;
        int stat, ret;
+       int rval;
 
        if (!ddb_entry)
                return FAILED;
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
                      ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
                      ha->dpc_flags, cmd->result, cmd->allowed));
 
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
+
        stat = qla4xxx_reset_target(ha, ddb_entry);
        if (stat != QLA_SUCCESS) {
                starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
 {
        int return_status = FAILED;
        struct scsi_qla_host *ha;
+       int rval;
 
        ha = to_qla_host(cmd->device->host);
 
+       rval = qla4xxx_isp_check_reg(ha);
+       if (rval != QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+               return FAILED;
+       }
+
        if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
                qla4_83xx_set_idc_dontreset(ha);
 
index 40fc7a590e81b6f9a6749dd4945a7f134e69364d..6be5ab32c94fef437f552a38f79bdc36825efb98 100644 (file)
@@ -1657,7 +1657,7 @@ static struct scsi_host_template scsi_driver = {
        .eh_timed_out =         storvsc_eh_timed_out,
        .slave_alloc =          storvsc_device_alloc,
        .slave_configure =      storvsc_device_configure,
-       .cmd_per_lun =          255,
+       .cmd_per_lun =          2048,
        .this_id =              -1,
        .use_clustering =       ENABLE_CLUSTERING,
        /* Make sure we dont get a sg segment crosses a page boundary */
index ca360daa6a253c7a9a0f4f29eaeaf5a49a1a8c98..378af306fda1748d8f587f466bb83fa34dbc9b5c 100644 (file)
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
         *  Look for the greatest clock divisor that allows an 
         *  input speed faster than the period.
         */
-       while (div-- > 0)
+       while (--div > 0)
                if (kpc >= (div_10M[div] << 2)) break;
 
        /*
index a355d989b414f9059abe8e04bad9bbf6d2459c34..c7da2c185990615f906e847def5b6513a1338801 100644 (file)
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
        /* REPORT SUPPORTED OPERATION CODES is not supported */
        sdev->no_report_opcodes = 1;
 
+       /* WRITE_SAME command is not supported */
+       sdev->no_write_same = 1;
 
        ufshcd_set_queue_depth(sdev);
 
index 53f7275d6cbdb513001582555f898adb35e1cc5b..750f931974119804734945a501e0786ca003977d 100644 (file)
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
                if (i == 1) {
                        domain->supply = devm_regulator_get(dev, "pu");
                        if (IS_ERR(domain->supply))
-                               return PTR_ERR(domain->supply);;
+                               return PTR_ERR(domain->supply);
 
                        ret = imx_pgc_get_clocks(dev, domain);
                        if (ret)
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev)
 
 static int imx_gpc_remove(struct platform_device *pdev)
 {
+       struct device_node *pgc_node;
        int ret;
 
+       pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+           !pgc_node)
+               return 0;
+
        /*
         * If the old DT binding is used the toplevel driver needs to
         * de-register the power domains
         */
-       if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) {
+       if (!pgc_node) {
                of_genpd_del_provider(pdev->dev.of_node);
 
                ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base);
index bbdc53b686dd19503796ed8fdfd97fd01596195f..6dbba5aff19117c6b85c05a4a114be93068a82ab 100644 (file)
@@ -702,30 +702,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
        size_t pgstart, pgend;
        int ret = -EINVAL;
 
+       mutex_lock(&ashmem_mutex);
+
        if (unlikely(!asma->file))
-               return -EINVAL;
+               goto out_unlock;
 
-       if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
-               return -EFAULT;
+       if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
+               ret = -EFAULT;
+               goto out_unlock;
+       }
 
        /* per custom, you can pass zero for len to mean "everything onward" */
        if (!pin.len)
                pin.len = PAGE_ALIGN(asma->size) - pin.offset;
 
        if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
-               return -EINVAL;
+               goto out_unlock;
 
        if (unlikely(((__u32)-1) - pin.offset < pin.len))
-               return -EINVAL;
+               goto out_unlock;
 
        if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
-               return -EINVAL;
+               goto out_unlock;
 
        pgstart = pin.offset / PAGE_SIZE;
        pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
 
-       mutex_lock(&ashmem_mutex);
-
        switch (cmd) {
        case ASHMEM_PIN:
                ret = ashmem_pin(asma, pgstart, pgend);
@@ -738,6 +740,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
                break;
        }
 
+out_unlock:
        mutex_unlock(&ashmem_mutex);
 
        return ret;
index 94e06925c712b5e52bb06d240ee33057077f8b0a..49718c96bf9ee3de6734ec9acfde9aa8fbc738d8 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/err.h>
 #include <linux/cma.h>
 #include <linux/scatterlist.h>
+#include <linux/highmem.h>
 
 #include "ion.h"
 
@@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
        if (!pages)
                return -ENOMEM;
 
+       if (PageHighMem(pages)) {
+               unsigned long nr_clear_pages = nr_pages;
+               struct page *page = pages;
+
+               while (nr_clear_pages > 0) {
+                       void *vaddr = kmap_atomic(page);
+
+                       memset(vaddr, 0, PAGE_SIZE);
+                       kunmap_atomic(vaddr);
+                       page++;
+                       nr_clear_pages--;
+               }
+       } else {
+               memset(page_address(pages), 0, size);
+       }
+
        table = kmalloc(sizeof(*table), GFP_KERNEL);
        if (!table)
                goto err;
index 1f91000491767a9c049a27b3f27edf4c3c39b4fe..b35ef7ee690146fe257bcd81516b4a5e9aa264ab 100644 (file)
@@ -7,7 +7,7 @@
 
 config FSL_MC_BUS
        bool "QorIQ DPAA2 fsl-mc bus driver"
-       depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC)))
+       depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC)))
        select GENERIC_MSI_IRQ_DOMAIN
        help
          Driver to enable the bus infrastructure for the QorIQ DPAA2
index f01595593ce2d25558a51d47ec2eece33767f002..425e8b82533bb3e637e52ef3c1f48a24f3cc3afa 100644 (file)
 #define AD7192_GPOCON_P1DAT    BIT(1) /* P1 state */
 #define AD7192_GPOCON_P0DAT    BIT(0) /* P0 state */
 
+#define AD7192_EXT_FREQ_MHZ_MIN        2457600
+#define AD7192_EXT_FREQ_MHZ_MAX        5120000
 #define AD7192_INT_FREQ_MHZ    4915200
 
 /* NOTE:
@@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
                                ARRAY_SIZE(ad7192_calib_arr));
 }
 
+static inline bool ad7192_valid_external_frequency(u32 freq)
+{
+       return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
+               freq <= AD7192_EXT_FREQ_MHZ_MAX);
+}
+
 static int ad7192_setup(struct ad7192_state *st,
                        const struct ad7192_platform_data *pdata)
 {
@@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st,
                         id);
 
        switch (pdata->clock_source_sel) {
-       case AD7192_CLK_EXT_MCLK1_2:
-       case AD7192_CLK_EXT_MCLK2:
-               st->mclk = AD7192_INT_FREQ_MHZ;
-               break;
        case AD7192_CLK_INT:
        case AD7192_CLK_INT_CO:
-               if (pdata->ext_clk_hz)
-                       st->mclk = pdata->ext_clk_hz;
-               else
-                       st->mclk = AD7192_INT_FREQ_MHZ;
+               st->mclk = AD7192_INT_FREQ_MHZ;
                break;
+       case AD7192_CLK_EXT_MCLK1_2:
+       case AD7192_CLK_EXT_MCLK2:
+               if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
+                       st->mclk = pdata->ext_clk_hz;
+                       break;
+               }
+               dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
+                       pdata->ext_clk_hz);
+               ret = -EINVAL;
+               goto out;
        default:
                ret = -EINVAL;
                goto out;
index 2b28fb9c0048e6c56e4b2c80c67f7e51c997c07b..3bcf494663617b73883241d73613f46324749e4b 100644 (file)
@@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
        /* Ring buffer functions - here trigger setup related */
        indio_dev->setup_ops = &ad5933_ring_setup_ops;
 
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
        return 0;
 }
 
@@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client,
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &ad5933_info;
        indio_dev->name = id->name;
-       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
        indio_dev->channels = ad5933_channels;
        indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
 
index 06b3b54a0e6804494c64c89a9f9181981a1f9c61..7b366a6c0b493f2eb8bec4959830d222223f3cb2 100644 (file)
@@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm)
                wb = &acm->wb[wbn];
                if (!wb->use) {
                        wb->use = 1;
+                       wb->len = 0;
                        return wbn;
                }
                wbn = (wbn + 1) % ACM_NW;
@@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty,
 static void acm_tty_flush_chars(struct tty_struct *tty)
 {
        struct acm *acm = tty->driver_data;
-       struct acm_wb *cur = acm->putbuffer;
+       struct acm_wb *cur;
        int err;
        unsigned long flags;
 
+       spin_lock_irqsave(&acm->write_lock, flags);
+
+       cur = acm->putbuffer;
        if (!cur) /* nothing to do */
-               return;
+               goto out;
 
        acm->putbuffer = NULL;
        err = usb_autopm_get_interface_async(acm->control);
-       spin_lock_irqsave(&acm->write_lock, flags);
        if (err < 0) {
                cur->use = 0;
                acm->putbuffer = cur;
index 4024926c1d68c93e97e40f822c4a690a4c113987..f4a548471f0fabb09ed78688cad39e0e085a3ca9 100644 (file)
@@ -226,6 +226,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Corsair K70 RGB */
+       { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Corsair Strafe RGB */
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
 
index e4c3ce0de5de11ba5532fb34c8b8d72e1fd98511..5bcad1d869b5070c628bf3b074ed078a5a9ed0c1 100644 (file)
@@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
                /* Not specific buffer needed for ep0 ZLP */
                dma_addr_t dma = hs_ep->desc_list_dma;
 
-               dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+               if (!index)
+                       dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+
                dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
        } else {
                dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
@@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
        if (ints & DXEPINT_STSPHSERCVD) {
                dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
 
-               /* Move to STATUS IN for DDMA */
-               if (using_desc_dma(hsotg))
-                       dwc2_hsotg_ep0_zlp(hsotg, true);
+               /* Safety check EP0 state when STSPHSERCVD asserted */
+               if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+                       /* Move to STATUS IN for DDMA */
+                       if (using_desc_dma(hsotg))
+                               dwc2_hsotg_ep0_zlp(hsotg, true);
+               }
+
        }
 
        if (ints & DXEPINT_BACK2BACKSETUP)
@@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
               DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
 
-       dwc2_hsotg_enqueue_setup(hsotg);
-
-       dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
-               dwc2_readl(hsotg->regs + DIEPCTL0),
-               dwc2_readl(hsotg->regs + DOEPCTL0));
-
        /* clear global NAKs */
        val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
        if (!is_usb_reset)
@@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        mdelay(3);
 
        hsotg->lx_state = DWC2_L0;
+
+       dwc2_hsotg_enqueue_setup(hsotg);
+
+       dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+               dwc2_readl(hsotg->regs + DIEPCTL0),
+               dwc2_readl(hsotg->regs + DOEPCTL0));
 }
 
 static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
index ade2ab00d37ab1255a168b5f5e8d0911c1051a2f..f1d838a4acd61532823883a0debe63b630bcec8e 100644 (file)
@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
        reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
        reg |= DWC3_GCTL_PRTCAPDIR(mode);
        dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+       dwc->current_dr_role = mode;
 }
 
 static void __dwc3_set_mode(struct work_struct *work)
@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work)
 
        dwc3_set_prtcap(dwc, dwc->desired_dr_role);
 
-       dwc->current_dr_role = dwc->desired_dr_role;
-
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        switch (dwc->desired_dr_role) {
@@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
         * XHCI driver will reset the host block. If dwc3 was configured for
         * host-only mode, then we can return early.
         */
-       if (dwc->dr_mode == USB_DR_MODE_HOST)
+       if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
                return 0;
 
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
                udelay(1);
        } while (--retries);
 
+       phy_exit(dwc->usb3_generic_phy);
+       phy_exit(dwc->usb2_generic_phy);
+
        return -ETIMEDOUT;
 }
 
@@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
        parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
 }
 
+static int dwc3_core_ulpi_init(struct dwc3 *dwc)
+{
+       int intf;
+       int ret = 0;
+
+       intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
+
+       if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
+           (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
+            dwc->hsphy_interface &&
+            !strncmp(dwc->hsphy_interface, "ulpi", 4)))
+               ret = dwc3_ulpi_init(dwc);
+
+       return ret;
+}
+
 /**
  * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
  * @dwc: Pointer to our controller context structure
@@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
 static int dwc3_phy_setup(struct dwc3 *dwc)
 {
        u32 reg;
-       int ret;
 
        reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
 
@@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
                }
                /* FALLTHROUGH */
        case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
-               ret = dwc3_ulpi_init(dwc);
-               if (ret)
-                       return ret;
                /* FALLTHROUGH */
        default:
                break;
@@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
 }
 
 static int dwc3_core_get_phy(struct dwc3 *dwc);
+static int dwc3_core_ulpi_init(struct dwc3 *dwc);
 
 /**
  * dwc3_core_init - Low-level initialization of DWC3 Core
@@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc)
                        dwc->maximum_speed = USB_SPEED_HIGH;
        }
 
-       ret = dwc3_core_get_phy(dwc);
+       ret = dwc3_phy_setup(dwc);
        if (ret)
                goto err0;
 
-       ret = dwc3_core_soft_reset(dwc);
-       if (ret)
-               goto err0;
+       if (!dwc->ulpi_ready) {
+               ret = dwc3_core_ulpi_init(dwc);
+               if (ret)
+                       goto err0;
+               dwc->ulpi_ready = true;
+       }
 
-       ret = dwc3_phy_setup(dwc);
+       if (!dwc->phys_ready) {
+               ret = dwc3_core_get_phy(dwc);
+               if (ret)
+                       goto err0a;
+               dwc->phys_ready = true;
+       }
+
+       ret = dwc3_core_soft_reset(dwc);
        if (ret)
-               goto err0;
+               goto err0a;
 
        dwc3_core_setup_global_control(dwc);
        dwc3_core_num_eps(dwc);
@@ -838,6 +864,9 @@ err1:
        phy_exit(dwc->usb2_generic_phy);
        phy_exit(dwc->usb3_generic_phy);
 
+err0a:
+       dwc3_ulpi_exit(dwc);
+
 err0:
        return ret;
 }
@@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
 
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
-               dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
                dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
 
                if (dwc->usb2_phy)
@@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
                }
                break;
        case USB_DR_MODE_HOST:
-               dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
                dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
 
                if (dwc->usb2_phy)
@@ -1234,7 +1261,6 @@ err4:
 
 err3:
        dwc3_free_event_buffers(dwc);
-       dwc3_ulpi_exit(dwc);
 
 err2:
        pm_runtime_allow(&pdev->dev);
@@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM
-static int dwc3_suspend_common(struct dwc3 *dwc)
+static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
 {
        unsigned long   flags;
 
@@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
                dwc3_core_exit(dwc);
                break;
        case DWC3_GCTL_PRTCAP_HOST:
+               /* do nothing during host runtime_suspend */
+               if (!PMSG_IS_AUTO(msg))
+                       dwc3_core_exit(dwc);
+               break;
        default:
                /* do nothing */
                break;
@@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
        return 0;
 }
 
-static int dwc3_resume_common(struct dwc3 *dwc)
+static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
 {
        unsigned long   flags;
        int             ret;
@@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc)
                spin_unlock_irqrestore(&dwc->lock, flags);
                break;
        case DWC3_GCTL_PRTCAP_HOST:
+               /* nothing to do on host runtime_resume */
+               if (!PMSG_IS_AUTO(msg)) {
+                       ret = dwc3_core_init(dwc);
+                       if (ret)
+                               return ret;
+               }
+               break;
        default:
                /* do nothing */
                break;
@@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc)
 static int dwc3_runtime_checks(struct dwc3 *dwc)
 {
        switch (dwc->current_dr_role) {
-       case USB_DR_MODE_PERIPHERAL:
-       case USB_DR_MODE_OTG:
+       case DWC3_GCTL_PRTCAP_DEVICE:
                if (dwc->connected)
                        return -EBUSY;
                break;
-       case USB_DR_MODE_HOST:
+       case DWC3_GCTL_PRTCAP_HOST:
        default:
                /* do nothing */
                break;
@@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev)
        if (dwc3_runtime_checks(dwc))
                return -EBUSY;
 
-       ret = dwc3_suspend_common(dwc);
+       ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
        if (ret)
                return ret;
 
@@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev)
 
        device_init_wakeup(dev, false);
 
-       ret = dwc3_resume_common(dwc);
+       ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
        if (ret)
                return ret;
 
@@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev)
        struct dwc3     *dwc = dev_get_drvdata(dev);
        int             ret;
 
-       ret = dwc3_suspend_common(dwc);
+       ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
        if (ret)
                return ret;
 
@@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       ret = dwc3_resume_common(dwc);
+       ret = dwc3_resume_common(dwc, PMSG_RESUME);
        if (ret)
                return ret;
 
index 03c7aaaac9268dbc61fa7fdfc47a1b37dd7b7f9c..860d2bc184d1c4a29e0a17ea648152c014cb50fc 100644 (file)
 #define DWC3_GDBGFIFOSPACE_TYPE(n)     (((n) << 5) & 0x1e0)
 #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
 
-#define DWC3_TXFIFOQ           1
-#define DWC3_RXFIFOQ           3
-#define DWC3_TXREQQ            5
-#define DWC3_RXREQQ            7
-#define DWC3_RXINFOQ           9
-#define DWC3_DESCFETCHQ                13
-#define DWC3_EVENTQ            15
+#define DWC3_TXFIFOQ           0
+#define DWC3_RXFIFOQ           1
+#define DWC3_TXREQQ            2
+#define DWC3_RXREQQ            3
+#define DWC3_RXINFOQ           4
+#define DWC3_PSTATQ            5
+#define DWC3_DESCFETCHQ                6
+#define DWC3_EVENTQ            7
+#define DWC3_AUXEVENTQ         8
 
 /* Global RX Threshold Configuration Register */
 #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
@@ -795,7 +797,9 @@ struct dwc3_scratchpad_array {
  * @usb3_phy: pointer to USB3 PHY
  * @usb2_generic_phy: pointer to USB2 PHY
  * @usb3_generic_phy: pointer to USB3 PHY
+ * @phys_ready: flag to indicate that PHYs are ready
  * @ulpi: pointer to ulpi interface
+ * @ulpi_ready: flag to indicate that ULPI is initialized
  * @u2sel: parameter from Set SEL request.
  * @u2pel: parameter from Set SEL request.
  * @u1sel: parameter from Set SEL request.
@@ -893,7 +897,10 @@ struct dwc3 {
        struct phy              *usb2_generic_phy;
        struct phy              *usb3_generic_phy;
 
+       bool                    phys_ready;
+
        struct ulpi             *ulpi;
+       bool                    ulpi_ready;
 
        void __iomem            *regs;
        size_t                  regs_size;
index 7ae0eefc7cc7daf0382c7aeaa56c14659f025ffb..e54c3622eb28a909aa5beb1d61ec09ec1324d082 100644 (file)
@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
                clk_disable_unprepare(simple->clks[i]);
                clk_put(simple->clks[i]);
        }
+       simple->num_clocks = 0;
 
        reset_control_assert(simple->resets);
        reset_control_put(simple->resets);
index a4719e853b85a96c185c6a011e10a353bc7d8d94..ed8b865176758fdeef0fc4d7c754793aad580ac0 100644 (file)
@@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev)
        return 0;
 }
 
+static void dwc3_omap_complete(struct device *dev)
+{
+       struct dwc3_omap        *omap = dev_get_drvdata(dev);
+
+       if (extcon_get_state(omap->edev, EXTCON_USB))
+               dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+       else
+               dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+       if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
+               dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+       else
+               dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+}
+
 static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
 
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
+       .complete = dwc3_omap_complete,
 };
 
 #define DEV_PM_OPS     (&dwc3_omap_dev_pm_ops)
index 9c2e4a17918e4c98fbd6d383a91d02388168e2b3..18be31d5743a1e64528c1567612f26be95106878 100644 (file)
@@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
                trb++;
                trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
                trace_dwc3_complete_trb(ep0, trb);
-               ep0->trb_enqueue = 0;
+
+               if (r->direction)
+                       dwc->eps[1]->trb_enqueue = 0;
+               else
+                       dwc->eps[0]->trb_enqueue = 0;
+
                dwc->ep0_bounced = false;
        }
 
index 616ef49ccb49e106c6a454c1351806bb18b1a257..2bda4eb1e9ac175858189c498af875ed8315df80 100644 (file)
@@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
                break;
        }
 
+       dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+
        /* Enable USB2 LPM Capability */
 
        if ((dwc->revision > DWC3_REVISION_194A) &&
index 8f2cf3baa19c10676915acbf4406a474e2e3e2a2..c2592d883f67c45f980c21cfa4181f77fac14760 100644 (file)
@@ -1855,44 +1855,20 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
        while(count--) {
-               struct usb_endpoint_descriptor *ds;
-               struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
-               int needs_comp_desc = false;
-               int desc_idx;
-
-               if (ffs->gadget->speed == USB_SPEED_SUPER) {
-                       desc_idx = 2;
-                       needs_comp_desc = true;
-               } else if (ffs->gadget->speed == USB_SPEED_HIGH)
-                       desc_idx = 1;
-               else
-                       desc_idx = 0;
-
-               /* fall-back to lower speed if desc missing for current speed */
-               do {
-                       ds = ep->descs[desc_idx];
-               } while (!ds && --desc_idx >= 0);
-
-               if (!ds) {
-                       ret = -EINVAL;
-                       break;
-               }
-
                ep->ep->driver_data = ep;
-               ep->ep->desc = ds;
 
-               if (needs_comp_desc) {
-                       comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
-                                       USB_DT_ENDPOINT_SIZE);
-                       ep->ep->maxburst = comp_desc->bMaxBurst + 1;
-                       ep->ep->comp_desc = comp_desc;
+               ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
+               if (ret) {
+                       pr_err("%s: config_ep_by_speed(%s) returned %d\n",
+                                       __func__, ep->ep->name, ret);
+                       break;
                }
 
                ret = usb_ep_enable(ep->ep);
                if (likely(!ret)) {
                        epfile->ep = ep;
-                       epfile->in = usb_endpoint_dir_in(ds);
-                       epfile->isoc = usb_endpoint_xfer_isoc(ds);
+                       epfile->in = usb_endpoint_dir_in(ep->ep->desc);
+                       epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
                } else {
                        break;
                }
@@ -2979,10 +2955,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
        struct ffs_data *ffs = func->ffs;
 
        const int full = !!func->ffs->fs_descs_count;
-       const int high = gadget_is_dualspeed(func->gadget) &&
-               func->ffs->hs_descs_count;
-       const int super = gadget_is_superspeed(func->gadget) &&
-               func->ffs->ss_descs_count;
+       const int high = !!func->ffs->hs_descs_count;
+       const int super = !!func->ffs->ss_descs_count;
 
        int fs_len, hs_len, ss_len, ret, i;
        struct ffs_ep *eps_ptr;
index 11fe788b430879d3559b5a6fda7a4a563cd22c34..d2dc1f00180b7869201afc1dbd82dcd4b126ab16 100644 (file)
@@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
                return ret;
        }
+       iad_desc.bFirstInterface = ret;
+
        std_ac_if_desc.bInterfaceNumber = ret;
        uac2->ac_intf = ret;
        uac2->ac_alt = 0;
index 1e9567091d86073f6e77f85021531de142c055a2..0875d38476ee9395fe33b1bb0d9b865e28322b24 100644 (file)
@@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT
        tristate "Synopsys USB 2.0 Device controller"
        depends on USB_GADGET && OF && HAS_DMA
        depends on EXTCON || EXTCON=n
-       select USB_GADGET_DUALSPEED
        select USB_SNP_CORE
        default ARCH_BCM_IPROC
        help
index 1e940f054cb8c41cb2150cd621eae1580f256bec..6dbc489513cdbbc5564598b0be10b2fce668c1e2 100644 (file)
@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        if (ret) {
                dev_err(&pci->dev,
                        "couldn't add resources to bdc device\n");
+               platform_device_put(bdc);
                return ret;
        }
 
index 859d5b11ba4c45353ce4174b1e0436db67189b90..1f8b19d9cf97bdd2224102a3e2a1684460c6a46f 100644 (file)
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
 void usb_ep_free_request(struct usb_ep *ep,
                                       struct usb_request *req)
 {
-       ep->ops->free_request(ep, req);
        trace_usb_ep_free_request(ep, req, 0);
+       ep->ops->free_request(ep, req);
 }
 EXPORT_SYMBOL_GPL(usb_ep_free_request);
 
index e5b4ee96c4bf6817e8e84f2af81644732b008ff3..56b517a38865aace8b212a75044d8ab29bf575ec 100644 (file)
@@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
 {
        struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
 
-       if (ep->name)
+       if (ep->ep.name)
                nuke(ep, -ESHUTDOWN);
 }
 
@@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
                curr_ep = get_ep_by_pipe(udc, i);
 
                /* If the ep is configured */
-               if (curr_ep->name == NULL) {
+               if (!curr_ep->ep.name) {
                        WARNING("Invalid EP?");
                        continue;
                }
index 6e87af2483679aac59f23e942dde9367a30d35e4..409cde4e6a516495da098e6648d70069db3a738d 100644 (file)
@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
        __renesas_usb3_ep_free_request(usb3->ep0_req);
        if (usb3->phy)
                phy_put(usb3->phy);
-       pm_runtime_disable(usb3_to_dev(usb3));
+       pm_runtime_disable(&pdev->dev);
 
        return 0;
 }
index facafdf8fb95dbc5104124b9d580f6c68e09b59a..d7641cbdee43d64e5bfcaff7c0628988f3c370fc 100644 (file)
@@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb(
        atomic_inc(&urb->use_count);
        atomic_inc(&urb->dev->urbnum);
        urb->setup_dma = dma_map_single(
-                       hcd->self.controller,
+                       hcd->self.sysdev,
                        urb->setup_packet,
                        sizeof(struct usb_ctrlrequest),
                        DMA_TO_DEVICE);
        urb->transfer_dma = dma_map_single(
-                       hcd->self.controller,
+                       hcd->self.sysdev,
                        urb->transfer_buffer,
                        urb->transfer_buffer_length,
                        DMA_FROM_DEVICE);
index 88158324dcae24ce8e19a881639c86dd324101b2..3276304056952b5d377ffb55971e62140c414760 100644 (file)
@@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature(
         * 15 secs after the setup
         */
        if (is_setup) {
-               /* SETUP pid */
+               /* SETUP pid, and interrupt after SETUP completion */
                qtd_fill(ehci, qtd, urb->setup_dma,
                                sizeof(struct usb_ctrlrequest),
-                               token | (2 /* "setup" */ << 8), 8);
+                               QTD_IOC | token | (2 /* "setup" */ << 8), 8);
 
                submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
                return 0; /*Return now; we shall come back after 15 seconds*/
@@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature(
        qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
        list_add_tail(&qtd->qtd_list, head);
 
-       /* dont fill any data in such packets */
-       qtd_fill(ehci, qtd, 0, 0, token, 0);
-
-       /* by default, enable interrupt on urb completion */
-       if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
-               qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+       /* Interrupt after STATUS completion */
+       qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
 
        submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
 
index ee96763493332458da2397bc85a24e8dfa5b70e6..84f88fa411cde338981dd9c7686d916dc22fd6ed 100644 (file)
@@ -74,6 +74,7 @@ static const char     hcd_name [] = "ohci_hcd";
 
 #define        STATECHANGE_DELAY       msecs_to_jiffies(300)
 #define        IO_WATCHDOG_DELAY       msecs_to_jiffies(275)
+#define        IO_WATCHDOG_OFF         0xffffff00
 
 #include "ohci.h"
 #include "pci-quirks.h"
@@ -231,7 +232,7 @@ static int ohci_urb_enqueue (
                }
 
                /* Start up the I/O watchdog timer, if it's not running */
-               if (!timer_pending(&ohci->io_watchdog) &&
+               if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
                                list_empty(&ohci->eds_in_use) &&
                                !(ohci->flags & OHCI_QUIRK_QEMU)) {
                        ohci->prev_frame_no = ohci_frame_no(ohci);
@@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci)
                return 0;
 
        timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
+       ohci->prev_frame_no = IO_WATCHDOG_OFF;
 
        ohci->hcca = dma_alloc_coherent (hcd->self.controller,
                        sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -730,7 +732,7 @@ static void io_watchdog_func(struct timer_list *t)
        u32             head;
        struct ed       *ed;
        struct td       *td, *td_start, *td_next;
-       unsigned        frame_no;
+       unsigned        frame_no, prev_frame_no = IO_WATCHDOG_OFF;
        unsigned long   flags;
 
        spin_lock_irqsave(&ohci->lock, flags);
@@ -835,7 +837,7 @@ static void io_watchdog_func(struct timer_list *t)
                        }
                }
                if (!list_empty(&ohci->eds_in_use)) {
-                       ohci->prev_frame_no = frame_no;
+                       prev_frame_no = frame_no;
                        ohci->prev_wdh_cnt = ohci->wdh_cnt;
                        ohci->prev_donehead = ohci_readl(ohci,
                                        &ohci->regs->donehead);
@@ -845,6 +847,7 @@ static void io_watchdog_func(struct timer_list *t)
        }
 
  done:
+       ohci->prev_frame_no = prev_frame_no;
        spin_unlock_irqrestore(&ohci->lock, flags);
 }
 
@@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd)
        if (quirk_nec(ohci))
                flush_work(&ohci->nec_work);
        del_timer_sync(&ohci->io_watchdog);
+       ohci->prev_frame_no = IO_WATCHDOG_OFF;
 
        ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
        ohci_usb_reset(ohci);
index fb7aaa3b9d067956b7bd6089217af6b5e2180cb8..634f3c7bf7748085758635ffe86108c948e0d08e 100644 (file)
@@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
                rc = ohci_rh_suspend (ohci, 0);
        spin_unlock_irq (&ohci->lock);
 
-       if (rc == 0)
+       if (rc == 0) {
                del_timer_sync(&ohci->io_watchdog);
+               ohci->prev_frame_no = IO_WATCHDOG_OFF;
+       }
        return rc;
 }
 
index b2ec8c399363b12fd430f10667303c88ecf5bc1a..4ccb85a67bb3c65bcf3d12e393e0838d4e30ada1 100644 (file)
@@ -1019,6 +1019,8 @@ skip_ed:
                 * have modified this list.  normally it's just prepending
                 * entries (which we'd ignore), but paranoia won't hurt.
                 */
+               *last = ed->ed_next;
+               ed->ed_next = NULL;
                modified = 0;
 
                /* unlink urbs as requested, but rescan the list after
@@ -1077,21 +1079,22 @@ rescan_this:
                        goto rescan_this;
 
                /*
-                * If no TDs are queued, take ED off the ed_rm_list.
+                * If no TDs are queued, ED is now idle.
                 * Otherwise, if the HC is running, reschedule.
-                * If not, leave it on the list for further dequeues.
+                * If the HC isn't running, add ED back to the
+                * start of the list for later processing.
                 */
                if (list_empty(&ed->td_list)) {
-                       *last = ed->ed_next;
-                       ed->ed_next = NULL;
                        ed->state = ED_IDLE;
                        list_del(&ed->in_use_list);
                } else if (ohci->rh_state == OHCI_RH_RUNNING) {
-                       *last = ed->ed_next;
-                       ed->ed_next = NULL;
                        ed_schedule(ohci, ed);
                } else {
-                       last = &ed->ed_next;
+                       ed->ed_next = ohci->ed_rm_list;
+                       ohci->ed_rm_list = ed;
+                       /* Don't loop on the same ED */
+                       if (last == &ohci->ed_rm_list)
+                               last = &ed->ed_next;
                }
 
                if (modified)
index 1615367170251a2c33b3db8bef9d9cf9ab4d80f7..67ad4bb6919a22b107630bf976fc4c5c66b148e1 100644 (file)
 #define        AX_INDXC                0x30
 #define        AX_DATAC                0x34
 
+#define PT_ADDR_INDX           0xE8
+#define PT_READ_INDX           0xE4
+#define PT_SIG_1_ADDR          0xA520
+#define PT_SIG_2_ADDR          0xA521
+#define PT_SIG_3_ADDR          0xA522
+#define PT_SIG_4_ADDR          0xA523
+#define PT_SIG_1_DATA          0x78
+#define PT_SIG_2_DATA          0x56
+#define PT_SIG_3_DATA          0x34
+#define PT_SIG_4_DATA          0x12
+#define PT4_P1_REG             0xB521
+#define PT4_P2_REG             0xB522
+#define PT2_P1_REG             0xD520
+#define PT2_P2_REG             0xD521
+#define PT1_P1_REG             0xD522
+#define PT1_P2_REG             0xD523
+
 #define        NB_PCIE_INDX_ADDR       0xe0
 #define        NB_PCIE_INDX_DATA       0xe4
 #define        PCIE_P_CNTL             0x10040
@@ -512,6 +529,98 @@ void usb_amd_dev_put(void)
 }
 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
 
+/*
+ * Check if port is disabled in BIOS on AMD Promontory host.
+ * BIOS Disabled ports may wake on connect/disconnect and need
+ * driver workaround to keep them disabled.
+ * Returns true if port is marked disabled.
+ */
+bool usb_amd_pt_check_port(struct device *device, int port)
+{
+       unsigned char value, port_shift;
+       struct pci_dev *pdev;
+       u16 reg;
+
+       pdev = to_pci_dev(device);
+       pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
+
+       pci_read_config_byte(pdev, PT_READ_INDX, &value);
+       if (value != PT_SIG_1_DATA)
+               return false;
+
+       pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
+
+       pci_read_config_byte(pdev, PT_READ_INDX, &value);
+       if (value != PT_SIG_2_DATA)
+               return false;
+
+       pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
+
+       pci_read_config_byte(pdev, PT_READ_INDX, &value);
+       if (value != PT_SIG_3_DATA)
+               return false;
+
+       pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
+
+       pci_read_config_byte(pdev, PT_READ_INDX, &value);
+       if (value != PT_SIG_4_DATA)
+               return false;
+
+       /* Check disabled port setting, if bit is set port is enabled */
+       switch (pdev->device) {
+       case 0x43b9:
+       case 0x43ba:
+       /*
+        * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
+        * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
+        * PT4_P2_REG bits[6..0] represents ports 13 to 7
+        */
+               if (port > 6) {
+                       reg = PT4_P2_REG;
+                       port_shift = port - 7;
+               } else {
+                       reg = PT4_P1_REG;
+                       port_shift = port + 1;
+               }
+               break;
+       case 0x43bb:
+       /*
+        * device is AMD_PROMONTORYA_2(0x43bb)
+        * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
+        * PT2_P2_REG bits[5..0] represents ports 9 to 3
+        */
+               if (port > 2) {
+                       reg = PT2_P2_REG;
+                       port_shift = port - 3;
+               } else {
+                       reg = PT2_P1_REG;
+                       port_shift = port + 5;
+               }
+               break;
+       case 0x43bc:
+       /*
+        * device is AMD_PROMONTORYA_1(0x43bc)
+        * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
+        * PT1_P2_REG[5..0] represents ports 9 to 4
+        */
+               if (port > 3) {
+                       reg = PT1_P2_REG;
+                       port_shift = port - 4;
+               } else {
+                       reg = PT1_P1_REG;
+                       port_shift = port + 4;
+               }
+               break;
+       default:
+               return false;
+       }
+       pci_write_config_word(pdev, PT_ADDR_INDX, reg);
+       pci_read_config_byte(pdev, PT_READ_INDX, &value);
+
+       return !(value & BIT(port_shift));
+}
+EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+
 /*
  * Make sure the controller is completely inactive, unable to
  * generate interrupts or do DMA.
index b68dcb5dd0fdb0d69b2e2e48cc9f984e58bff56d..4ca0d9b7e463c54766e7881d239f8e6319d89c13 100644 (file)
@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
 void sb800_prefetch(struct device *dev, int on);
 bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
+bool usb_amd_pt_check_port(struct device *device, int port);
 #else
 struct pci_dev;
 static inline void usb_amd_quirk_pll_disable(void) {}
@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
 static inline void usb_amd_dev_put(void) {}
 static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
 static inline void sb800_prefetch(struct device *dev, int on) {}
+static inline bool usb_amd_pt_check_port(struct device *device, int port)
+{
+       return false;
+}
 #endif  /* CONFIG_USB_PCI */
 
 #endif  /*  __LINUX_USB_PCI_QUIRKS_H  */
index e26e685d8a578fddffaa7ff220a0c4442ba4f5ce..5851052d4668a64f9ab816de84f1381100650ea6 100644 (file)
@@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,
 static int xhci_ring_trb_show(struct seq_file *s, void *unused)
 {
        int                     i;
-       struct xhci_ring        *ring = s->private;
+       struct xhci_ring        *ring = *(struct xhci_ring **)s->private;
        struct xhci_segment     *seg = ring->first_seg;
 
        for (i = 0; i < ring->num_segs; i++) {
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
 
        snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
        epriv->root = xhci_debugfs_create_ring_dir(xhci,
-                                                  &dev->eps[ep_index].new_ring,
+                                                  &dev->eps[ep_index].ring,
                                                   epriv->name,
                                                   spriv->root);
        spriv->eps[ep_index] = epriv;
index 46d5e08f05f154361a0adef325d56cc732f4c172..72ebbc908e19f7ea9701ca0e831ee703874d1908 100644 (file)
@@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                temp = readl(port_array[wIndex]);
                                break;
                        }
-
-                       /* Software should not attempt to set
-                        * port link state above '3' (U3) and the port
-                        * must be enabled.
-                        */
-                       if ((temp & PORT_PE) == 0 ||
-                               (link_state > USB_SS_PORT_LS_U3)) {
-                               xhci_warn(xhci, "Cannot set link state.\n");
+                       /* Port must be enabled */
+                       if (!(temp & PORT_PE)) {
+                               retval = -ENODEV;
+                               break;
+                       }
+                       /* Can't set port link state above '3' (U3) */
+                       if (link_state > USB_SS_PORT_LS_U3) {
+                               xhci_warn(xhci, "Cannot set port %d link state %d\n",
+                                        wIndex, link_state);
                                goto error;
                        }
-
                        if (link_state == USB_SS_PORT_LS_U3) {
                                slot_id = xhci_find_slot_id_by_port(hcd, xhci,
                                                wIndex + 1);
@@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                                t2 |= PORT_WKOC_E | PORT_WKCONN_E;
                                t2 &= ~PORT_WKDISC_E;
                        }
+
+                       if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+                           (hcd->speed < HCD_USB3)) {
+                               if (usb_amd_pt_check_port(hcd->self.controller,
+                                                         port_index))
+                                       t2 &= ~PORT_WAKE_BITS;
+                       }
                } else
                        t2 &= ~PORT_WAKE_BITS;
 
index 6c79037876db0523ed24c47561fbe1c7e2e2a1ba..5262fa571a5dabb3d84437798b033c5a9af91347 100644 (file)
 #define PCI_DEVICE_ID_INTEL_APL_XHCI                   0x5aa8
 #define PCI_DEVICE_ID_INTEL_DNV_XHCI                   0x19d0
 
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2                        0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1                        0x43bc
 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI               0x1142
 
 static const char hcd_name[] = "xhci_hcd";
@@ -125,6 +129,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
+       if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+               ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+               (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+               xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
                xhci->quirks |= XHCI_INTEL_HOST;
index 1eeb3396300f245fd1741f319cc62a5482487ed9..25d4b748a56f3f4a5e3d29a55e853571195fc014 100644 (file)
@@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd)
                return;
        }
 
-       xhci_debugfs_exit(xhci);
-
        xhci_dbc_exit(xhci);
 
        spin_lock_irq(&xhci->lock);
@@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd)
 
        xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
        xhci_mem_cleanup(xhci);
+       xhci_debugfs_exit(xhci);
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "xhci_stop completed - status = %x",
                        readl(&xhci->op_regs->status));
@@ -1014,6 +1013,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
                xhci_dbg(xhci, "cleaning up memory\n");
                xhci_mem_cleanup(xhci);
+               xhci_debugfs_exit(xhci);
                xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
                            readl(&xhci->op_regs->status));
 
@@ -3544,12 +3544,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
        }
-
+       xhci_debugfs_remove_slot(xhci, udev->slot_id);
        ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret) {
-               xhci_debugfs_remove_slot(xhci, udev->slot_id);
+       if (ret)
                xhci_free_virt_device(xhci, udev->slot_id);
-       }
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
index 96099a245c69eecdd84feae2c3924ac7e9ca0a84..e4d7d3d06a759489b0c4aeb8690b58ca72f9c056 100644 (file)
@@ -1822,7 +1822,7 @@ struct xhci_hcd {
 /* For controller with a broken Port Disable implementation */
 #define XHCI_BROKEN_PORT_PED   (1 << 25)
 #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-/* Reserved. It was XHCI_U2_DISABLE_WAKE */
+#define XHCI_U2_DISABLE_WAKE   (1 << 27)
 #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL        (1 << 28)
 #define XHCI_HW_LPM_DISABLE    (1 << 29)
 
index 63b9e85dc0e9365254bc8752a833fd6fd1ba24dd..236a60f53099e042faa9d804f4c470b1d5b90ef1 100644 (file)
@@ -42,6 +42,9 @@
 #define USB_DEVICE_ID_LD_MICROCASSYTIME                0x1033  /* USB Product ID of Micro-CASSY Time (reserved) */
 #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035  /* USB Product ID of Micro-CASSY Temperature */
 #define USB_DEVICE_ID_LD_MICROCASSYPH          0x1038  /* USB Product ID of Micro-CASSY pH */
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY    0x1040  /* USB Product ID of Power Analyser CASSY */
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY      0x1042  /* USB Product ID of Converter Controller CASSY */
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY      0x1043  /* USB Product ID of Machine Test CASSY */
 #define USB_DEVICE_ID_LD_JWM           0x1080  /* USB Product ID of Joule and Wattmeter */
 #define USB_DEVICE_ID_LD_DMMP          0x1081  /* USB Product ID of Digital Multimeter P (reserved) */
 #define USB_DEVICE_ID_LD_UMIP          0x1090  /* USB Product ID of UMI P */
@@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = {
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+       { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+       { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+       { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
        { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
index 968bf1e8b0fed683f76563eddbe8d841a920e395..eef4ad578b31da28641cd2fe5889827781fdc6ab 100644 (file)
@@ -2708,7 +2708,8 @@ static int musb_resume(struct device *dev)
        if ((devctl & mask) != (musb->context.devctl & mask))
                musb->port1_status = 0;
 
-       musb_start(musb);
+       musb_enable_interrupts(musb);
+       musb_platform_enable(musb);
 
        spin_lock_irqsave(&musb->lock, flags);
        error = musb_run_resume_work(musb);
index 394b4ac8616173ba0f2a60a870ae70ff98dd40d0..45ed32c2cba949dd7babb4b9e4879a89aa251027 100644 (file)
@@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                }
        }
 
-       /*
-        * The pipe must be broken if current urb->status is set, so don't
-        * start next urb.
-        * TODO: to minimize the risk of regression, only check urb->status
-        * for RX, until we have a test case to understand the behavior of TX.
-        */
-       if ((!status || !is_in) && qh && qh->is_ready) {
+       if (qh != NULL && qh->is_ready) {
                musb_dbg(musb, "... next ep%d %cX urb %p",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
index da031c45395abd2576e144b409b4231972001a37..fbec863350f67cb914aef30a8036de2cf5819f25 100644 (file)
@@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
        void __iomem *base = phy->io_priv;
        enum usb_charger_type chgr_type = UNKNOWN_TYPE;
 
+       if (!regmap)
+               return UNKNOWN_TYPE;
+
        if (mxs_charger_data_contact_detect(mxs_phy))
                return chgr_type;
 
index 5925d111bd4743e2378b2d91e0489d111738767d..39fa2fc1b8b767e10d37b6bde54a696d00de9f0c 100644 (file)
@@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
        if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
                goto usbhsf_pio_prepare_pop;
 
+       /* return at this time if the pipe is running */
+       if (usbhs_pipe_is_running(pipe))
+               return 0;
+
        usbhs_pipe_config_change_bfre(pipe, 1);
 
        ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
        usbhsf_fifo_clear(pipe, fifo);
        pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
 
+       usbhs_pipe_running(pipe, 0);
        usbhsf_dma_stop(pipe, fifo);
        usbhsf_dma_unmap(pkt);
        usbhsf_fifo_unselect(pipe, pipe->fifo);
index 5db8ed517e0e137de511192bb9c79e63618afe0b..2d8d9150da0cc70abd4cc440fad2618f38b558fb 100644 (file)
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EC21                   0x0121
 #define QUECTEL_PRODUCT_EC25                   0x0125
 #define QUECTEL_PRODUCT_BG96                   0x0296
+#define QUECTEL_PRODUCT_EP06                   0x0306
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
        .reserved = BIT(1) | BIT(4),
 };
 
+static const struct option_blacklist_info quectel_ep06_blacklist = {
+       .reserved = BIT(4) | BIT(5),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+         .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index 49e552472c3f32ac651287f3f6e620acb7690931..dd8ef36ab10ec7d612bdb2358017f142a43906d8 100644 (file)
@@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                        goto err;
 
                sdev->ud.tcp_socket = socket;
+               sdev->ud.sockfd = sockfd;
 
                spin_unlock_irq(&sdev->ud.lock);
 
@@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
        if (ud->tcp_socket) {
                sockfd_put(ud->tcp_socket);
                ud->tcp_socket = NULL;
+               ud->sockfd = -1;
        }
 
        /* 3. free used data */
@@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
        sdev->ud.status         = SDEV_ST_AVAILABLE;
        spin_lock_init(&sdev->ud.lock);
        sdev->ud.tcp_socket     = NULL;
+       sdev->ud.sockfd         = -1;
 
        INIT_LIST_HEAD(&sdev->priv_init);
        INIT_LIST_HEAD(&sdev->priv_tx);
index c3e1008aa491ee45071adab6215f7440aa6dfa13..20e3d4609583848f8a14271d5c4bf3ffb8776123 100644 (file)
@@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
        if (vdev->ud.tcp_socket) {
                sockfd_put(vdev->ud.tcp_socket);
                vdev->ud.tcp_socket = NULL;
+               vdev->ud.sockfd = -1;
        }
        pr_info("release socket\n");
 
@@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud)
        if (ud->tcp_socket) {
                sockfd_put(ud->tcp_socket);
                ud->tcp_socket = NULL;
+               ud->sockfd = -1;
        }
        ud->status = VDEV_ST_NULL;
 
index aff773bcebdb59f14a88a3c92b779f5c03de3ba5..37460cd6cabb806ecdf536f5a44019ff845164b9 100644 (file)
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG
 config RAVE_SP_WATCHDOG
        tristate "RAVE SP Watchdog timer"
        depends on RAVE_SP_CORE
+       depends on NVMEM || !NVMEM
        select WATCHDOG_CORE
        help
          Support for the watchdog on RAVE SP device.
@@ -903,6 +904,7 @@ config F71808E_WDT
 config SP5100_TCO
        tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
        depends on X86 && PCI
+       select WATCHDOG_CORE
        ---help---
          Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
          (Total Cost of Ownership) timer is a watchdog timer that will reboot
@@ -1008,6 +1010,7 @@ config WAFER_WDT
 config I6300ESB_WDT
        tristate "Intel 6300ESB Timer/Watchdog"
        depends on PCI
+       select WATCHDOG_CORE
        ---help---
          Hardware driver for the watchdog timer built into the Intel
          6300ESB controller hub.
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V
 config XEN_WDT
        tristate "Xen Watchdog support"
        depends on XEN
+       select WATCHDOG_CORE
        help
          Say Y here to support the hypervisor watchdog capability provided
          by Xen 4.0 and newer.  The watchdog timeout period is normally one
index 1ab4bd11f5f3f01f34b1a0055f836c9a0ce755ba..762378f1811cc9069dc6171edb55aaa3610b82fa 100644 (file)
@@ -755,8 +755,8 @@ out:
        mutex_unlock(&irq_mapping_update_lock);
        return irq;
 error_irq:
-       for (; i >= 0; i--)
-               __unbind_from_irq(irq + i);
+       while (nvec--)
+               __unbind_from_irq(irq + nvec);
        mutex_unlock(&irq_mapping_update_lock);
        return ret;
 }
index 156e5aea36db964dfaac58784044ca1cd0cf57c7..b1092fbefa6309d2535b17b78979b6f3fa9b2b42 100644 (file)
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
                                        sock);
        if (!map) {
                ret = -EFAULT;
-               sock_release(map->sock);
+               sock_release(sock);
        }
 
 out:
index aedbee3b28386a1cc1625334fe431875bd787277..2f11ca72a281410122ef0b4f0dc4f173ef7c6697 100644 (file)
@@ -73,20 +73,25 @@ struct sock_mapping {
                        wait_queue_head_t inflight_conn_req;
                } active;
                struct {
-               /* Socket status */
+               /*
+                * Socket status, needs to be 64-bit aligned due to the
+                * test_and_* functions which have this requirement on arm64.
+                */
 #define PVCALLS_STATUS_UNINITALIZED  0
 #define PVCALLS_STATUS_BIND          1
 #define PVCALLS_STATUS_LISTEN        2
-                       uint8_t status;
+                       uint8_t status __attribute__((aligned(8)));
                /*
                 * Internal state-machine flags.
                 * Only one accept operation can be inflight for a socket.
                 * Only one poll operation can be inflight for a given socket.
+                * flags needs to be 64-bit aligned due to the test_and_*
+                * functions which have this requirement on arm64.
                 */
 #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0
 #define PVCALLS_FLAG_POLL_INFLIGHT   1
 #define PVCALLS_FLAG_POLL_RET        2
-                       uint8_t flags;
+                       uint8_t flags __attribute__((aligned(8)));
                        uint32_t inflight_req_id;
                        struct sock_mapping *accept_map;
                        wait_queue_head_t inflight_accept_req;
index bf13d1ec51f3bee0c92e9f9c968c21c13bd72173..04e7b3b29bac898ca66baac7aa96eadba2e3dc35 100644 (file)
@@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
        int pool = tmem_frontswap_poolid;
        int ret;
 
+       /* THP isn't supported */
+       if (PageTransHuge(page))
+               return -1;
+
        if (pool < 0)
                return -1;
        if (ind64 != ind)
index 4a181fcb51751dc2cbc8fda10930a47bc883380e..fe09ef9c21f349a55d31261b5dd416b1bde2a3aa 100644 (file)
@@ -1058,6 +1058,27 @@ retry:
        return 0;
 }
 
+static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
+{
+       struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
+
+       if (!disk)
+               return NULL;
+       /*
+        * Now that we hold gendisk reference we make sure bdev we looked up is
+        * not stale. If it is, it means device got removed and created before
+        * we looked up gendisk and we fail open in such case. Associating
+        * unhashed bdev with newly created gendisk could lead to two bdevs
+        * (and thus two independent caches) being associated with one device
+        * which is bad.
+        */
+       if (inode_unhashed(bdev->bd_inode)) {
+               put_disk_and_module(disk);
+               return NULL;
+       }
+       return disk;
+}
+
 /**
  * bd_start_claiming - start claiming a block device
  * @bdev: block device of interest
@@ -1094,7 +1115,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
         * @bdev might not have been initialized properly yet, look up
         * and grab the outer block device the hard way.
         */
-       disk = get_gendisk(bdev->bd_dev, &partno);
+       disk = bdev_get_gendisk(bdev, &partno);
        if (!disk)
                return ERR_PTR(-ENXIO);
 
@@ -1111,8 +1132,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
        else
                whole = bdgrab(bdev);
 
-       module_put(disk->fops->owner);
-       put_disk(disk);
+       put_disk_and_module(disk);
        if (!whole)
                return ERR_PTR(-ENOMEM);
 
@@ -1407,10 +1427,10 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
 static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
 {
        struct gendisk *disk;
-       struct module *owner;
        int ret;
        int partno;
        int perm = 0;
+       bool first_open = false;
 
        if (mode & FMODE_READ)
                perm |= MAY_READ;
@@ -1430,14 +1450,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
  restart:
 
        ret = -ENXIO;
-       disk = get_gendisk(bdev->bd_dev, &partno);
+       disk = bdev_get_gendisk(bdev, &partno);
        if (!disk)
                goto out;
-       owner = disk->fops->owner;
 
        disk_block_events(disk);
        mutex_lock_nested(&bdev->bd_mutex, for_part);
        if (!bdev->bd_openers) {
+               first_open = true;
                bdev->bd_disk = disk;
                bdev->bd_queue = disk->queue;
                bdev->bd_contains = bdev;
@@ -1463,8 +1483,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                        bdev->bd_queue = NULL;
                                        mutex_unlock(&bdev->bd_mutex);
                                        disk_unblock_events(disk);
-                                       put_disk(disk);
-                                       module_put(owner);
+                                       put_disk_and_module(disk);
                                        goto restart;
                                }
                        }
@@ -1524,15 +1543,15 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (ret)
                                goto out_unlock_bdev;
                }
-               /* only one opener holds refs to the module and disk */
-               put_disk(disk);
-               module_put(owner);
        }
        bdev->bd_openers++;
        if (for_part)
                bdev->bd_part_count++;
        mutex_unlock(&bdev->bd_mutex);
        disk_unblock_events(disk);
+       /* only one opener holds refs to the module and disk */
+       if (!first_open)
+               put_disk_and_module(disk);
        return 0;
 
  out_clear:
@@ -1546,8 +1565,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
  out_unlock_bdev:
        mutex_unlock(&bdev->bd_mutex);
        disk_unblock_events(disk);
-       put_disk(disk);
-       module_put(owner);
+       put_disk_and_module(disk);
  out:
        bdput(bdev);
 
@@ -1770,8 +1788,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                        disk->fops->release(disk, mode);
        }
        if (!bdev->bd_openers) {
-               struct module *owner = disk->fops->owner;
-
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
@@ -1779,8 +1795,7 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
 
-               put_disk(disk);
-               module_put(owner);
+               put_disk_and_module(disk);
        }
        mutex_unlock(&bdev->bd_mutex);
        bdput(bdev);
index 6582c4507e6c9d1fdf2876c13c424f0c5830495c..0e5bd3e3344e7983e6bdf38dba1adfba17500eba 100644 (file)
@@ -3964,6 +3964,32 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
                ceph_check_caps(ci, 0, NULL);
 }
 
+/*
+ * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
+ * looks like the link count will hit 0, drop any other caps (other
+ * than PIN) we don't specifically want (due to the file still being
+ * open).
+ */
+int ceph_drop_caps_for_unlink(struct inode *inode)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
+
+       spin_lock(&ci->i_ceph_lock);
+       if (inode->i_nlink == 1) {
+               drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
+
+               ci->i_ceph_flags |= CEPH_I_NODELAY;
+               if (__ceph_caps_dirty(ci)) {
+                       struct ceph_mds_client *mdsc =
+                               ceph_inode_to_client(inode)->mdsc;
+                       __cap_delay_requeue_front(mdsc, ci);
+               }
+       }
+       spin_unlock(&ci->i_ceph_lock);
+       return drop;
+}
+
 /*
  * Helpers for embedding cap and dentry lease releases into mds
  * requests.
index 0c4346806e17a6f9a70e35fdafab13440c426d65..f1d9c6cc0491d7f9f33e4073db9785afac669cc8 100644 (file)
@@ -1002,26 +1002,6 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        return err;
 }
 
-/*
- * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps.  If it
- * looks like the link count will hit 0, drop any other caps (other
- * than PIN) we don't specifically want (due to the file still being
- * open).
- */
-static int drop_caps_for_unlink(struct inode *inode)
-{
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
-
-       spin_lock(&ci->i_ceph_lock);
-       if (inode->i_nlink == 1) {
-               drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
-               ci->i_ceph_flags |= CEPH_I_NODELAY;
-       }
-       spin_unlock(&ci->i_ceph_lock);
-       return drop;
-}
-
 /*
  * rmdir and unlink are differ only by the metadata op code
  */
@@ -1056,7 +1036,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
-       req->r_inode_drop = drop_caps_for_unlink(inode);
+       req->r_inode_drop = ceph_drop_caps_for_unlink(inode);
        err = ceph_mdsc_do_request(mdsc, dir, req);
        if (!err && !req->r_reply_info.head->is_dentry)
                d_delete(dentry);
@@ -1104,8 +1084,10 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
        req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
        /* release LINK_RDCACHE on source inode (mds will lock it) */
        req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
-       if (d_really_is_positive(new_dentry))
-               req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
+       if (d_really_is_positive(new_dentry)) {
+               req->r_inode_drop =
+                       ceph_drop_caps_for_unlink(d_inode(new_dentry));
+       }
        err = ceph_mdsc_do_request(mdsc, old_dir, req);
        if (!err && !req->r_reply_info.head->is_dentry) {
                /*
index a62d2a9841dc2b0487181155373c03eac60f8a02..fb2bc9c15a2378ceb5712269ab93e677c360f5d0 100644 (file)
@@ -225,6 +225,7 @@ static int parse_fsopt_token(char *c, void *private)
                        return -ENOMEM;
                break;
        case Opt_mds_namespace:
+               kfree(fsopt->mds_namespace);
                fsopt->mds_namespace = kstrndup(argstr[0].from,
                                                argstr[0].to-argstr[0].from,
                                                GFP_KERNEL);
@@ -232,6 +233,7 @@ static int parse_fsopt_token(char *c, void *private)
                        return -ENOMEM;
                break;
        case Opt_fscache_uniq:
+               kfree(fsopt->fscache_uniq);
                fsopt->fscache_uniq = kstrndup(argstr[0].from,
                                               argstr[0].to-argstr[0].from,
                                               GFP_KERNEL);
@@ -711,14 +713,17 @@ static int __init init_caches(void)
                goto bad_dentry;
 
        ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
-
        if (!ceph_file_cachep)
                goto bad_file;
 
-       if ((error = ceph_fscache_register()))
-               goto bad_file;
+       error = ceph_fscache_register();
+       if (error)
+               goto bad_fscache;
 
        return 0;
+
+bad_fscache:
+       kmem_cache_destroy(ceph_file_cachep);
 bad_file:
        kmem_cache_destroy(ceph_dentry_cachep);
 bad_dentry:
@@ -836,7 +841,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
        int err;
        unsigned long started = jiffies;  /* note the start time */
        struct dentry *root;
-       int first = 0;   /* first vfsmount for this super_block */
 
        dout("mount start %p\n", fsc);
        mutex_lock(&fsc->client->mount_mutex);
@@ -861,17 +865,17 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
                        path = fsc->mount_options->server_path + 1;
                        dout("mount opening path %s\n", path);
                }
+
+               err = ceph_fs_debugfs_init(fsc);
+               if (err < 0)
+                       goto out;
+
                root = open_root_dentry(fsc, path, started);
                if (IS_ERR(root)) {
                        err = PTR_ERR(root);
                        goto out;
                }
                fsc->sb->s_root = dget(root);
-               first = 1;
-
-               err = ceph_fs_debugfs_init(fsc);
-               if (err < 0)
-                       goto fail;
        } else {
                root = dget(fsc->sb->s_root);
        }
@@ -881,11 +885,6 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
        mutex_unlock(&fsc->client->mount_mutex);
        return root;
 
-fail:
-       if (first) {
-               dput(fsc->sb->s_root);
-               fsc->sb->s_root = NULL;
-       }
 out:
        mutex_unlock(&fsc->client->mount_mutex);
        return ERR_PTR(err);
index 21b2e5b004eb72ba10057df37c6907766aed5993..1c2086e0fec27a60577c7a676b00fac67fb11102 100644 (file)
@@ -987,7 +987,7 @@ extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                            struct ceph_mds_session *session);
 extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
 extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
-
+extern int  ceph_drop_caps_for_unlink(struct inode *inode);
 extern int ceph_encode_inode_release(void **p, struct inode *inode,
                                     int mds, int drop, int unless, int force);
 extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
index a0ca9e48e9937da671739e24d6d7dc2a4867d7ac..1357ef563893a1a8f0d2967eeb4b6e7b0ee6444a 100644 (file)
@@ -1274,8 +1274,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
         */
        if (dio->is_async && iov_iter_rw(iter) == WRITE) {
                retval = 0;
-               if ((iocb->ki_filp->f_flags & O_DSYNC) ||
-                   IS_SYNC(iocb->ki_filp->f_mapping->host))
+               if (iocb->ki_flags & IOCB_DSYNC)
                        retval = dio_set_defer_completion(dio);
                else if (!dio->inode->i_sb->s_dio_done_wq) {
                        /*
index 5f22e74bbadea0822e3f6f5378b732ab3c1e333d..8e568428c88be059299b5ada7eb48e7d289a04fe 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/efi.h>
+#include <linux/delay.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/mount.h>
@@ -74,6 +75,11 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
        ssize_t size = 0;
        int err;
 
+       while (!__ratelimit(&file->f_cred->user->ratelimit)) {
+               if (!msleep_interruptible(50))
+                       return -EINTR;
+       }
+
        err = efivar_entry_size(var, &datasize);
 
        /*
index 2435af56b87e4472a725f7ef1c6e3b252dd82ae9..a50d7813e3ea8dd03dd7c5e02b36574be815ad8b 100644 (file)
@@ -572,7 +572,7 @@ out:
 }
 
 static bool
-validate_bitmap_values(unsigned long mask)
+validate_bitmap_values(unsigned int mask)
 {
        return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
 }
@@ -596,17 +596,15 @@ __be32 nfs4_callback_recallany(void *argp, void *resp,
                goto out;
 
        status = cpu_to_be32(NFS4_OK);
-       if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
-                    &args->craa_type_mask))
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
                flags = FMODE_READ;
-       if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
-                    &args->craa_type_mask))
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
                flags |= FMODE_WRITE;
-       if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
-                    &args->craa_type_mask))
-               pnfs_recall_all_layouts(cps->clp);
        if (flags)
                nfs_expire_unused_delegation_types(cps->clp, flags);
+
+       if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
+               pnfs_recall_all_layouts(cps->clp);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
        return status;
index 49f848fd1f04779108d86a5eee618e21d11304e6..7327930ad970ab98338e8f21a4fb363559205522 100644 (file)
@@ -873,7 +873,7 @@ static void nfs3_nlm_release_call(void *data)
        }
 }
 
-const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
+static const struct nlmclnt_operations nlmclnt_fl_close_lock_ops = {
        .nlmclnt_alloc_call = nfs3_nlm_alloc_call,
        .nlmclnt_unlock_prepare = nfs3_nlm_unlock_prepare,
        .nlmclnt_release_call = nfs3_nlm_release_call,
index 04612c24d39431b15ff58027a7ebfe7b5450d774..979631411a0e4ea5a7eedc3c668d36a57c20ced6 100644 (file)
@@ -868,8 +868,10 @@ static int nfs4_set_client(struct nfs_server *server,
        if (IS_ERR(clp))
                return PTR_ERR(clp);
 
-       if (server->nfs_client == clp)
+       if (server->nfs_client == clp) {
+               nfs_put_client(clp);
                return -ELOOP;
+       }
 
        /*
         * Query for the lease time on clientid setup or renewal
@@ -1244,11 +1246,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
                                clp->cl_proto, clnt->cl_timeout,
                                clp->cl_minorversion, net);
        clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
-       nfs_put_client(clp);
        if (error != 0) {
                nfs_server_insert_lists(server);
                return error;
        }
+       nfs_put_client(clp);
 
        if (server->nfs_client->cl_hostname == NULL)
                server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
index 9990957264e3cc9460f287e7b1ef4f1dc94a1ea4..76bf9cc620742c1b7cd1622ff1f859fb7b606d51 100644 (file)
@@ -118,13 +118,22 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                err |= __put_user(kinfo->si_trapno, &uinfo->ssi_trapno);
 #endif
 #ifdef BUS_MCEERR_AO
-               /* 
+               /*
+                * Other callers might not initialize the si_lsb field,
+                * so check explicitly for the right codes here.
+                */
+               if (kinfo->si_signo == SIGBUS &&
+                    kinfo->si_code == BUS_MCEERR_AO)
+                       err |= __put_user((short) kinfo->si_addr_lsb,
+                                         &uinfo->ssi_addr_lsb);
+#endif
+#ifdef BUS_MCEERR_AR
+               /*
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
                if (kinfo->si_signo == SIGBUS &&
-                   (kinfo->si_code == BUS_MCEERR_AR ||
-                    kinfo->si_code == BUS_MCEERR_AO))
+                   kinfo->si_code == BUS_MCEERR_AR)
                        err |= __put_user((short) kinfo->si_addr_lsb,
                                          &uinfo->ssi_addr_lsb);
 #endif
index fd975524f4603387e28078d13b175b5df2443021..05c66e05ae20f0bbe08c713de9feddf13c813982 100644 (file)
@@ -767,7 +767,7 @@ int
 xfs_scrub_agfl(
        struct xfs_scrub_context        *sc)
 {
-       struct xfs_scrub_agfl_info      sai = { 0 };
+       struct xfs_scrub_agfl_info      sai;
        struct xfs_agf                  *agf;
        xfs_agnumber_t                  agno;
        unsigned int                    agflcount;
@@ -795,6 +795,7 @@ xfs_scrub_agfl(
                xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
                goto out;
        }
+       memset(&sai, 0, sizeof(sai));
        sai.sz_entries = agflcount;
        sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
        if (!sai.entries) {
index 3a55d6fc271b1e6d50aa6ce96c9e84b7c5886e43..7a39f40645f7dddbd41740bce4404dbf36fd635b 100644 (file)
@@ -23,6 +23,7 @@
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
 #include "xfs_bit.h"
+#include "xfs_shared.h"
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_trans.h"
@@ -456,10 +457,12 @@ xfs_cui_recover(
         * transaction.  Normally, any work that needs to be deferred
         * gets attached to the same defer_ops that scheduled the
         * refcount update.  However, we're in log recovery here, so we
-        * we create our own defer_ops and use that to finish up any
-        * work that doesn't fit.
+        * we use the passed in defer_ops and to finish up any work that
+        * doesn't fit.  We need to reserve enough blocks to handle a
+        * full btree split on either end of the refcount range.
         */
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+                       mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                return error;
        cudp = xfs_trans_get_cud(tp, cuip);
index f3b139c9aa1674a3f652ec622286e9aba616d5c3..49d3124863a81f719efd1774b0b6ac7f651b0a26 100644 (file)
@@ -23,6 +23,7 @@
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
 #include "xfs_bit.h"
+#include "xfs_shared.h"
 #include "xfs_mount.h"
 #include "xfs_defer.h"
 #include "xfs_trans.h"
@@ -470,7 +471,8 @@ xfs_rui_recover(
                }
        }
 
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+                       mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
        if (error)
                return error;
        rudp = xfs_trans_get_rud(tp, ruip);
index 7aba628dc5279858a7608a78839b0e32753bbee6..93588ea3d3d2ca2d664edb4e1b853f5d0dad6a73 100644 (file)
@@ -250,6 +250,7 @@ xfs_parseargs(
                                return -EINVAL;
                        break;
                case Opt_logdev:
+                       kfree(mp->m_logname);
                        mp->m_logname = match_strdup(args);
                        if (!mp->m_logname)
                                return -ENOMEM;
@@ -258,6 +259,7 @@ xfs_parseargs(
                        xfs_warn(mp, "%s option not allowed on this system", p);
                        return -EINVAL;
                case Opt_rtdev:
+                       kfree(mp->m_rtname);
                        mp->m_rtname = match_strdup(args);
                        if (!mp->m_rtname)
                                return -ENOMEM;
index 963b755d19b0329e9e15b238db5f62074d615c51..a7613e1b0c87a4a38635b6fe99333afde18238b6 100644 (file)
@@ -52,6 +52,7 @@ struct bug_entry {
 #ifndef HAVE_ARCH_BUG
 #define BUG() do { \
        printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+       barrier_before_unreachable(); \
        panic("BUG!"); \
 } while (0)
 #endif
index 1c27526c499eed6f50b46771aaea27282567deb9..cf13842a6dbd1fa8808f4e9c0d49a5f9b012c931 100644 (file)
@@ -134,6 +134,15 @@ struct drm_crtc_commit {
         * &drm_pending_vblank_event pointer to clean up private events.
         */
        struct drm_pending_vblank_event *event;
+
+       /**
+        * @abort_completion:
+        *
+        * A flag that's set after drm_atomic_helper_setup_commit takes a second
+        * reference for the completion of $drm_crtc_state.event. It's used by
+        * the free code to remove the second reference if commit fails.
+        */
+       bool abort_completion;
 };
 
 struct __drm_planes_state {
index 76e237bd989bef2f81142ef143960d9720651827..6914633037a5359f1355a15e94163dbde7d4ebe2 100644 (file)
@@ -77,5 +77,6 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 void drm_kms_helper_poll_disable(struct drm_device *dev);
 void drm_kms_helper_poll_enable(struct drm_device *dev);
+bool drm_kms_helper_is_poll_worker(void);
 
 #endif
index d32b688eb346c9252eb66d73be49fdd477e290de..d23dcdd1bd95a20ba98e4ae65b57d6f225cad7c3 100644 (file)
@@ -56,6 +56,7 @@ struct drm_printer;
 #define DRIVER_ATOMIC                  0x10000
 #define DRIVER_KMS_LEGACY_CONTEXT      0x20000
 #define DRIVER_SYNCOBJ                  0x40000
+#define DRIVER_PREFER_XBGR_30BPP        0x80000
 
 /**
  * struct drm_driver - DRM driver structure
index d0eb659fa733eb91b57a135932f45b1eea8d9975..ce547a25e8aed5d61efd5c0ce9b5c450af4fabcc 100644 (file)
@@ -511,6 +511,7 @@ void zero_fill_bio(struct bio *bio);
 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
+extern const char *bio_devname(struct bio *bio, char *buffer);
 
 #define bio_set_dev(bio, bdev)                         \
 do {                                           \
@@ -529,9 +530,6 @@ do {                                                \
 #define bio_dev(bio) \
        disk_devt((bio)->bi_disk)
 
-#define bio_devname(bio, buf) \
-       __bdevname(bio_dev(bio), (buf))
-
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
 void bio_disassociate_task(struct bio *bio);
index d02a4df3f4737a2ae98cce7928c3dcf4c4167164..d3f264a5b04d9c999a94273c18272416576b17c1 100644 (file)
@@ -27,3 +27,8 @@
 #if __has_feature(address_sanitizer)
 #define __SANITIZE_ADDRESS__
 #endif
+
+/* Clang doesn't have a way to turn it off per-function, yet. */
+#ifdef __noretpoline
+#undef __noretpoline
+#endif
index 73bc63e0a1c4b664f233f176c7694fa8e54e34aa..e2c7f4369effdbcf9cb46b1904c3cbe84debd2ca 100644 (file)
 #define __weak         __attribute__((weak))
 #define __alias(symbol)        __attribute__((alias(#symbol)))
 
+#ifdef RETPOLINE
+#define __noretpoline __attribute__((indirect_branch("keep")))
+#endif
+
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
 #endif
 #endif
 
+/*
+ * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
+ * confuse the stack allocation in gcc, leading to overly large stack
+ * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365
+ *
+ * Adding an empty inline assembly before it works around the problem
+ */
+#define barrier_before_unreachable() asm volatile("")
+
 /*
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
  * unreleased.  Really, we need to have autoconf for the kernel.
  */
 #define unreachable() \
-       do { annotate_unreachable(); __builtin_unreachable(); } while (0)
+       do {                                    \
+               annotate_unreachable();         \
+               barrier_before_unreachable();   \
+               __builtin_unreachable();        \
+       } while (0)
 
 /* Mark a function definition as prohibited from being cloned. */
 #define __noclone      __attribute__((__noclone__, __optimize__("no-tracer")))
index e835fc0423eccf7f66cca38c9dcbfa4f6d065ce3..ab4711c63601495347eee07382a692a4b1cee3ab 100644 (file)
@@ -86,6 +86,11 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 # define barrier_data(ptr) barrier()
 #endif
 
+/* workaround for GCC PR82365 if needed */
+#ifndef barrier_before_unreachable
+# define barrier_before_unreachable() do { } while (0)
+#endif
+
 /* Unreachable code */
 #ifdef CONFIG_STACK_VALIDATION
 /*
index 5e3531027b5113cc7a9fde0ec0ffc0cdf3389b97..c826b0b5232aff63877bb441fb62e43128b4e976 100644 (file)
@@ -198,6 +198,7 @@ struct gendisk {
        void *private_data;
 
        int flags;
+       struct rw_semaphore lookup_sem;
        struct kobject *slave_dir;
 
        struct timer_rand_state *random;
@@ -600,8 +601,9 @@ extern void delete_partition(struct gendisk *, int);
 extern void printk_all_partitions(void);
 
 extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern struct kobject *get_disk(struct gendisk *disk);
+extern struct kobject *get_disk_and_module(struct gendisk *disk);
 extern void put_disk(struct gendisk *disk);
+extern void put_disk_and_module(struct gendisk *disk);
 extern void blk_register_region(dev_t devt, unsigned long range,
                        struct module *module,
                        struct kobject *(*probe)(dev_t, int *, void *),
index 506a9815113159651da4b98ec5d72700dbe6271f..bc27cf03c41ea5a4b19d87bb82da0f3d36e9390e 100644 (file)
@@ -6,10 +6,10 @@
 #include <linux/types.h>
 
 /* Built-in __init functions needn't be compiled with retpoline */
-#if defined(RETPOLINE) && !defined(MODULE)
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#if defined(__noretpoline) && !defined(MODULE)
+#define __noinitretpoline __noretpoline
 #else
-#define __noretpoline
+#define __noinitretpoline
 #endif
 
 /* These macros are used to mark some functions or 
@@ -47,7 +47,7 @@
 
 /* These are for everybody (although not all archs will actually
    discard it in modules) */
-#define __init         __section(.init.text) __cold  __latent_entropy __noretpoline
+#define __init         __section(.init.text) __cold  __latent_entropy __noinitretpoline
 #define __initdata     __section(.init.data)
 #define __initconst    __section(.init.rodata)
 #define __exitdata     __section(.exit.data)
index b6a29c126cc49285f62995b59dc517f17cdc3af6..2168cc6b8b301dffb8a8333fc699a516c6ab51c7 100644 (file)
@@ -151,6 +151,7 @@ extern struct jump_entry __start___jump_table[];
 extern struct jump_entry __stop___jump_table[];
 
 extern void jump_label_init(void);
+extern void jump_label_invalidate_init(void);
 extern void jump_label_lock(void);
 extern void jump_label_unlock(void);
 extern void arch_jump_label_transform(struct jump_entry *entry,
@@ -198,6 +199,8 @@ static __always_inline void jump_label_init(void)
        static_key_initialized = true;
 }
 
+static inline void jump_label_invalidate_init(void) {}
+
 static __always_inline bool static_key_false(struct static_key *key)
 {
        if (unlikely(static_key_count(key) > 0))
index fec5076eda91d2fd6e401b43463cb43855aa6c57..dcde9471897d5c5119145fd01dddd8e7f118f13c 100644 (file)
@@ -4,6 +4,12 @@
 
 #include <generated/autoconf.h>
 
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __BIG_ENDIAN 4321
+#else
+#define __LITTLE_ENDIAN 1234
+#endif
+
 #define __ARG_PLACEHOLDER_1 0,
 #define __take_second_arg(__ignored, val, ...) val
 
@@ -64,4 +70,7 @@
  */
 #define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
 
+/* Make sure we always have all types and struct attributes defined. */
+#include <linux/compiler_types.h>
+
 #endif /* __LINUX_KCONFIG_H */
index ce51455e2adf631229d21b43e6afb76b2496790c..3fd291503576f1407e633851bec2b785152e6576 100644 (file)
@@ -472,6 +472,7 @@ extern bool parse_option_str(const char *str, const char *option);
 extern char *next_arg(char *args, char **param, char **val);
 
 extern int core_kernel_text(unsigned long addr);
+extern int init_kernel_text(unsigned long addr);
 extern int core_kernel_data(unsigned long addr);
 extern int __kernel_text_address(unsigned long addr);
 extern int kernel_text_address(unsigned long addr);
index ac0062b74aed048923c9f08b4a9d74d176ff6c96..6930c63126c78a9ef665b5b5653a60a8773b4d4c 100644 (file)
@@ -1105,7 +1105,6 @@ static inline void kvm_irq_routing_update(struct kvm *kvm)
 {
 }
 #endif
-void kvm_arch_irq_routing_update(struct kvm *kvm);
 
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
@@ -1114,6 +1113,8 @@ static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
+void kvm_arch_irq_routing_update(struct kvm *kvm);
+
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 {
        /*
@@ -1272,4 +1273,7 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
 }
 #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
 
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+               unsigned long start, unsigned long end);
+
 #endif
index 8820468635810a3d801a5dd52beca94a7a14b035..c46016bb25ebe2ba3919987876ae058dd0a7b01c 100644 (file)
@@ -523,9 +523,11 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
 static inline void mod_memcg_state(struct mem_cgroup *memcg,
                                   int idx, int val)
 {
-       preempt_disable();
+       unsigned long flags;
+
+       local_irq_save(flags);
        __mod_memcg_state(memcg, idx, val);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /**
@@ -606,9 +608,11 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
 static inline void mod_lruvec_state(struct lruvec *lruvec,
                                    enum node_stat_item idx, int val)
 {
-       preempt_disable();
+       unsigned long flags;
+
+       local_irq_save(flags);
        __mod_lruvec_state(lruvec, idx, val);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 static inline void __mod_lruvec_page_state(struct page *page,
@@ -630,9 +634,11 @@ static inline void __mod_lruvec_page_state(struct page *page,
 static inline void mod_lruvec_page_state(struct page *page,
                                         enum node_stat_item idx, int val)
 {
-       preempt_disable();
+       unsigned long flags;
+
+       local_irq_save(flags);
        __mod_lruvec_page_state(page, idx, val);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -659,9 +665,11 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
 static inline void count_memcg_events(struct mem_cgroup *memcg,
                                      int idx, unsigned long count)
 {
-       preempt_disable();
+       unsigned long flags;
+
+       local_irq_save(flags);
        __count_memcg_events(memcg, idx, count);
-       preempt_enable();
+       local_irq_restore(flags);
 }
 
 /* idx can be of type enum memcg_event_item or vm_event_item */
index f25c13423bd4774d3d602d2ec7afb561a39614d5..cb3bbed4e6339cfd89586c15a1d65b1d7af61106 100644 (file)
@@ -66,6 +66,11 @@ struct mutex {
 #endif
 };
 
+/*
+ * Internal helper function; C doesn't allow us to hide it :/
+ *
+ * DO NOT USE (outside of mutex code).
+ */
 static inline struct task_struct *__mutex_owner(struct mutex *lock)
 {
        return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
index fbc98e2c8228d0b8ca17e4af9be1cb1cddaccd27..e791ebc65c9c0776325cdc5e72de5d995038d7e0 100644 (file)
@@ -5,6 +5,7 @@
 
 #ifndef _LINUX_NOSPEC_H
 #define _LINUX_NOSPEC_H
+#include <asm/barrier.h>
 
 /**
  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
@@ -29,26 +30,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 }
 #endif
 
-/*
- * Warn developers about inappropriate array_index_nospec() usage.
- *
- * Even if the CPU speculates past the WARN_ONCE branch, the
- * sign bit of @index is taken into account when generating the
- * mask.
- *
- * This warning is compiled out when the compiler can infer that
- * @index and @size are less than LONG_MAX.
- */
-#define array_index_mask_nospec_check(index, size)                             \
-({                                                                             \
-       if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,                      \
-           "array_index_nospec() limited to range of [0, LONG_MAX]\n"))        \
-               _mask = 0;                                                      \
-       else                                                                    \
-               _mask = array_index_mask_nospec(index, size);                   \
-       _mask;                                                                  \
-})
-
 /*
  * array_index_nospec - sanitize an array index after a bounds check
  *
@@ -67,12 +48,11 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 ({                                                                     \
        typeof(index) _i = (index);                                     \
        typeof(size) _s = (size);                                       \
-       unsigned long _mask = array_index_mask_nospec_check(_i, _s);    \
+       unsigned long _mask = array_index_mask_nospec(_i, _s);          \
                                                                        \
        BUILD_BUG_ON(sizeof(_i) > sizeof(long));                        \
        BUILD_BUG_ON(sizeof(_s) > sizeof(long));                        \
                                                                        \
-       _i &= _mask;                                                    \
-       _i;                                                             \
+       (typeof(_i)) (_i & _mask);                                      \
 })
 #endif /* _LINUX_NOSPEC_H */
index af0f44effd44abc067d7f31e498c433fd061725e..40036a57d072f28dbdd9de54e7d1972d37072d64 100644 (file)
 
 #include <linux/interrupt.h>
 #include <linux/perf_event.h>
+#include <linux/platform_device.h>
 #include <linux/sysfs.h>
 #include <asm/cputype.h>
 
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- *     interrupt and passed the address of the low level handler,
- *     and can be used to implement any platform specific handling
- *     before or after calling it.
- *
- * @irq_flags: if non-zero, these flags will be passed to request_irq
- *             when requesting interrupts for this PMU device.
- */
-struct arm_pmu_platdata {
-       irqreturn_t (*handle_irq)(int irq, void *dev,
-                                 irq_handler_t pmu_handler);
-       unsigned long irq_flags;
-};
-
 #ifdef CONFIG_ARM_PMU
 
 /*
@@ -92,7 +76,6 @@ enum armpmu_attr_groups {
 
 struct arm_pmu {
        struct pmu      pmu;
-       cpumask_t       active_irqs;
        cpumask_t       supported_cpus;
        char            *name;
        irqreturn_t     (*handle_irq)(int irq_num, void *dev);
@@ -174,12 +157,11 @@ static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
+struct arm_pmu *armpmu_alloc_atomic(void);
 void armpmu_free(struct arm_pmu *pmu);
 int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irqs(struct arm_pmu *armpmu);
-void armpmu_free_irqs(struct arm_pmu *armpmu);
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+int armpmu_request_irq(int irq, int cpu);
+void armpmu_free_irq(int irq, int cpu);
 
 #define ARMV8_PMU_PDEV_NAME "armv8-pmu"
 
index b884b7794187ee4c007b512107c41b7fe5055516..e6335227b84482c9598f1d73206024bf3a8c7646 100644 (file)
@@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
  */
 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-       if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+       if (size > KMALLOC_MAX_SIZE / sizeof(void *))
                return NULL;
        return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
 }
index 1149533aa2fa2838d28b4351522314cb302bc0a4..9806184bb3d54eb5160db40f747574868837e787 100644 (file)
@@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
        atomic_inc(&mm->mm_count);
 }
 
-extern void mmdrop(struct mm_struct *mm);
+extern void __mmdrop(struct mm_struct *mm);
+
+static inline void mmdrop(struct mm_struct *mm)
+{
+       /*
+        * The implicit full barrier implied by atomic_dec_and_test() is
+        * required by the membarrier system call before returning to
+        * user-space, after storing to rq->curr.
+        */
+       if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+               __mmdrop(mm);
+}
 
 /**
  * mmget() - Pin the address space associated with a &struct mm_struct.
index 0dcf4e480ef73345174bfb7d1446e0e1a89daa5c..96fe289c4c6e496fefd005d3100c84bf58f81698 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/uidgid.h>
 #include <linux/atomic.h>
+#include <linux/ratelimit.h>
 
 struct key;
 
@@ -41,6 +42,9 @@ struct user_struct {
     defined(CONFIG_NET)
        atomic_long_t locked_vm;
 #endif
+
+       /* Miscellaneous per-user rate limit */
+       struct ratelimit_state ratelimit;
 };
 
 extern int uids_sysfs_init(void);
index 5ebc0f869720a35d2790a6befc9f33fb54a4f9a1..c1e66bdcf5837047fc48cd3bbe15e1e1bc0d3a92 100644 (file)
@@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
        return true;
 }
 
-/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
  * in checksum_init.
  */
 #define CHECKSUM_BREAK 76
index 7b6a59f722a39eda9db1b8de902001c434b0be80..a1a3f4ed94cea88c43ecc31d8f70aed80ef7618f 100644 (file)
@@ -337,8 +337,6 @@ extern void deactivate_file_page(struct page *page);
 extern void mark_page_lazyfree(struct page *page);
 extern void swap_setup(void);
 
-extern void add_page_to_unevictable_list(struct page *page);
-
 extern void lru_cache_add_active_or_unevictable(struct page *page,
                                                struct vm_area_struct *vma);
 
index 4a54ef96aff5b0ba8e9bf53cc754d09c0dd4dd55..bc0cda180c8b701a154182b5dbc6a7f5b9da840a 100644 (file)
@@ -465,6 +465,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 extern void workqueue_set_max_active(struct workqueue_struct *wq,
                                     int max_active);
+extern struct work_struct *current_work(void);
 extern bool current_is_workqueue_rescuer(void);
 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
 extern unsigned int work_busy(struct work_struct *work);
index c4df6cee48e6ab41520d73d4a9e12a7be76b0f07..bf00a5a41a90643437e3a750456ff534e9fa7945 100644 (file)
@@ -117,7 +117,7 @@ struct dmx_ts_feed {
  *               specified by @filter_value that will be used on the filter
  *               match logic.
  * @filter_mode:  Contains a 16 bytes (128 bits) filter mode.
- * @parent:      Pointer to struct dmx_section_feed.
+ * @parent:      Back-pointer to struct dmx_section_feed.
  * @priv:        Pointer to private data of the API client.
  *
  *
@@ -130,8 +130,9 @@ struct dmx_section_filter {
        u8 filter_value[DMX_MAX_FILTER_SIZE];
        u8 filter_mask[DMX_MAX_FILTER_SIZE];
        u8 filter_mode[DMX_MAX_FILTER_SIZE];
-       struct dmx_section_feed *parent; /* Back-pointer */
-       void *priv; /* Pointer to private data of the API client */
+       struct dmx_section_feed *parent;
+
+       void *priv;
 };
 
 /**
@@ -193,6 +194,10 @@ struct dmx_section_feed {
  * @buffer2:           Pointer to the tail of the filtered TS packets, or NULL.
  * @buffer2_length:    Length of the TS data in buffer2.
  * @source:            Indicates which TS feed is the source of the callback.
+ * @buffer_flags:      Address where buffer flags are stored. Those are
+ *                     used to report discontinuity users via DVB
+ *                     memory mapped API, as defined by
+ *                     &enum dmx_buffer_flags.
  *
  * This function callback prototype, provided by the client of the demux API,
  * is called from the demux code. The function is only called when filtering
@@ -245,7 +250,8 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
                         size_t buffer1_length,
                         const u8 *buffer2,
                         size_t buffer2_length,
-                        struct dmx_ts_feed *source);
+                        struct dmx_ts_feed *source,
+                        u32 *buffer_flags);
 
 /**
  * typedef dmx_section_cb - DVB demux TS filter callback function prototype
@@ -261,6 +267,10 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
  *                     including headers and CRC.
  * @source:            Indicates which section feed is the source of the
  *                     callback.
+ * @buffer_flags:      Address where buffer flags are stored. Those are
+ *                     used to report discontinuity users via DVB
+ *                     memory mapped API, as defined by
+ *                     &enum dmx_buffer_flags.
  *
  * This function callback prototype, provided by the client of the demux API,
  * is called from the demux code. The function is only called when
@@ -286,7 +296,8 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
                              size_t buffer1_len,
                              const u8 *buffer2,
                              size_t buffer2_len,
-                             struct dmx_section_filter *source);
+                             struct dmx_section_filter *source,
+                             u32 *buffer_flags);
 
 /*
  * DVB Front-End
index 2f5cb2c7b6a75dcf582d9f8fb6e97985c8b47888..baafa3b8aca4140ec3fe426138bada16e0a1da6f 100644 (file)
@@ -163,6 +163,7 @@ struct dmxdev_filter {
  * @demux:             pointer to &struct dmx_demux.
  * @filternum:         number of filters.
  * @capabilities:      demux capabilities as defined by &enum dmx_demux_caps.
+ * @may_do_mmap:       flag used to indicate if the device may do mmap.
  * @exit:              flag to indicate that the demux is being released.
  * @dvr_orig_fe:       pointer to &struct dmx_frontend.
  * @dvr_buffer:                embedded &struct dvb_ringbuffer for DVB output.
@@ -180,6 +181,7 @@ struct dmxdev {
        int filternum;
        int capabilities;
 
+       unsigned int may_do_mmap:1;
        unsigned int exit:1;
 #define DMXDEV_CAP_DUPLEX 1
        struct dmx_frontend *dvr_orig_fe;
index b07092038f4bde977cd50a40c16e263d2f4293ed..3b6aeca7a49e44f5dd2a602842d14362951404ae 100644 (file)
@@ -115,6 +115,8 @@ struct dvb_demux_filter {
  * @pid:       PID to be filtered.
  * @timeout:   feed timeout.
  * @filter:    pointer to &struct dvb_demux_filter.
+ * @buffer_flags: Buffer flags used to report discontinuity users via DVB
+ *               memory mapped API, as defined by &enum dmx_buffer_flags.
  * @ts_type:   type of TS, as defined by &enum ts_filter_type.
  * @pes_type:  type of PES, as defined by &enum dmx_ts_pes.
  * @cc:                MPEG-TS packet continuity counter
@@ -145,6 +147,8 @@ struct dvb_demux_feed {
        ktime_t timeout;
        struct dvb_demux_filter *filter;
 
+       u32 buffer_flags;
+
        enum ts_filter_type ts_type;
        enum dmx_ts_pes pes_type;
 
index 01d1202d1a55b8ed30d92e4ad46b20cbd17409a0..8cb88452cd6c287ced6cea133419096ef1a22955 100644 (file)
@@ -85,6 +85,12 @@ struct dvb_buffer {
  * @nonblocking:
  *             If different than zero, device is operating on non-blocking
  *             mode.
+ * @flags:     buffer flags as defined by &enum dmx_buffer_flags.
+ *             Filled only at &DMX_DQBUF. &DMX_QBUF should zero this field.
+ * @count:     monotonic counter for filled buffers. Helps to identify
+ *             data stream loses. Filled only at &DMX_DQBUF. &DMX_QBUF should
+ *             zero this field.
+ *
  * @name:      name of the device type. Currently, it can either be
  *             "dvr" or "demux_filter".
  */
@@ -100,10 +106,14 @@ struct dvb_vb2_ctx {
        int     buf_siz;
        int     buf_cnt;
        int     nonblocking;
+
+       enum dmx_buffer_flags flags;
+       u32     count;
+
        char    name[DVB_VB2_NAME_MAX + 1];
 };
 
-#ifndef DVB_MMAP
+#ifndef CONFIG_DVB_MMAP
 static inline int dvb_vb2_init(struct dvb_vb2_ctx *ctx,
                               const char *name, int non_blocking)
 {
@@ -114,7 +124,7 @@ static inline int dvb_vb2_release(struct dvb_vb2_ctx *ctx)
        return 0;
 };
 #define dvb_vb2_is_streaming(ctx) (0)
-#define dvb_vb2_fill_buffer(ctx, file, wait) (0)
+#define dvb_vb2_fill_buffer(ctx, file, wait, flags) (0)
 
 static inline __poll_t dvb_vb2_poll(struct dvb_vb2_ctx *ctx,
                                    struct file *file,
@@ -153,9 +163,13 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx);
  * @ctx:       control struct for VB2 handler
  * @src:       place where the data is stored
  * @len:       number of bytes to be copied from @src
+ * @buffer_flags:
+ *             pointer to buffer flags as defined by &enum dmx_buffer_flags.
+ *             can be NULL.
  */
 int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx,
-                       const unsigned char *src, int len);
+                       const unsigned char *src, int len,
+                       enum dmx_buffer_flags *buffer_flags);
 
 /**
  * dvb_vb2_poll - Wrapper to vb2_core_streamon() for Digital TV
index 906e902230662562c97f03a7ccd37e6339747eba..c96511fa9198e98175f691cb5335841ac27f8126 100644 (file)
@@ -4149,7 +4149,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
  * The TX headroom reserved by mac80211 for its own tx_status functions.
  * This is enough for the radiotap header.
  */
-#define IEEE80211_TX_STATUS_HEADROOM   14
+#define IEEE80211_TX_STATUS_HEADROOM   ALIGN(14, 4)
 
 /**
  * ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
index ebc5a2ed86317dfdab8939d6259ed7ea7b0199fb..f83cacce33085e3922cd23cd51ccca393f85114b 100644 (file)
@@ -78,7 +78,7 @@ struct regulatory_request {
        int wiphy_idx;
        enum nl80211_reg_initiator initiator;
        enum nl80211_user_reg_hint_type user_reg_hint_type;
-       char alpha2[2];
+       char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        bool intersect;
        bool processed;
index 81bdbf97319b216ad1d7955aa13c7cae2c8ac044..9185e45b997ff67ce2999c6b0665c8d693b44338 100644 (file)
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
                UDP_SKB_CB(skb)->cscov = cscov;
                if (skb->ip_summed == CHECKSUM_COMPLETE)
                        skb->ip_summed = CHECKSUM_NONE;
+               skb->csum_valid = 0;
         }
 
        return 0;
index c2d81167c8585ff68c6caf5127013c22b1bb8c11..2cdf8dcf4bdcbc70538768619098ac2c5a2e9c48 100644 (file)
@@ -28,10 +28,6 @@ enum rdma_restrack_type {
         * @RDMA_RESTRACK_QP: Queue pair (QP)
         */
        RDMA_RESTRACK_QP,
-       /**
-        * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
-        */
-       RDMA_RESTRACK_XRCD,
        /**
         * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
         */
index 6da44079aa58961cccde88f8366944f2c96166c0..38287d9d23a1f90b4e03f023db0fa2d5fa2175fd 100644 (file)
@@ -276,10 +276,7 @@ struct uverbs_object_tree_def {
  */
 
 struct uverbs_ptr_attr {
-       union {
-               u64             data;
-               void    __user *ptr;
-       };
+       u64             data;
        u16             len;
        /* Combination of bits from enum UVERBS_ATTR_F_XXXX */
        u16             flags;
@@ -351,38 +348,60 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr
 }
 
 static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
-                                size_t idx, const void *from)
+                                size_t idx, const void *from, size_t size)
 {
        const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
        u16 flags;
+       size_t min_size;
 
        if (IS_ERR(attr))
                return PTR_ERR(attr);
 
+       min_size = min_t(size_t, attr->ptr_attr.len, size);
+       if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
+               return -EFAULT;
+
        flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
-       return (!copy_to_user(attr->ptr_attr.ptr, from, attr->ptr_attr.len) &&
-               !put_user(flags, &attr->uattr->flags)) ? 0 : -EFAULT;
+       if (put_user(flags, &attr->uattr->flags))
+               return -EFAULT;
+
+       return 0;
 }
 
-static inline int _uverbs_copy_from(void *to, size_t to_size,
+static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
+{
+       return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
+}
+
+static inline int _uverbs_copy_from(void *to,
                                    const struct uverbs_attr_bundle *attrs_bundle,
-                                   size_t idx)
+                                   size_t idx,
+                                   size_t size)
 {
        const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
 
        if (IS_ERR(attr))
                return PTR_ERR(attr);
 
-       if (to_size <= sizeof(((struct ib_uverbs_attr *)0)->data))
+       /*
+        * Validation ensures attr->ptr_attr.len >= size. If the caller is
+        * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with
+        * the right size.
+        */
+       if (unlikely(size < attr->ptr_attr.len))
+               return -EINVAL;
+
+       if (uverbs_attr_ptr_is_inline(attr))
                memcpy(to, &attr->ptr_attr.data, attr->ptr_attr.len);
-       else if (copy_from_user(to, attr->ptr_attr.ptr, attr->ptr_attr.len))
+       else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data),
+                               attr->ptr_attr.len))
                return -EFAULT;
 
        return 0;
 }
 
 #define uverbs_copy_from(to, attrs_bundle, idx)                                      \
-       _uverbs_copy_from(to, sizeof(*(to)), attrs_bundle, idx)
+       _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to))
 
 /* =================================================
  *      Definitions -> Specs infrastructure
index c2d1b15da136e9aa9273a90471ac8be7a1b6b413..a91f25151a5b96ce43d61e2f58841aefa4190baa 100644 (file)
@@ -15,6 +15,7 @@
 
 #define ARC_REG_MCIP_BCR       0x0d0
 #define ARC_REG_MCIP_IDU_BCR   0x0D5
+#define ARC_REG_GFRC_BUILD     0x0D6
 #define ARC_REG_MCIP_CMD       0x600
 #define ARC_REG_MCIP_WDATA     0x601
 #define ARC_REG_MCIP_READBACK  0x602
@@ -36,10 +37,14 @@ struct mcip_cmd {
 #define CMD_SEMA_RELEASE               0x12
 
 #define CMD_DEBUG_SET_MASK             0x34
+#define CMD_DEBUG_READ_MASK            0x35
 #define CMD_DEBUG_SET_SELECT           0x36
+#define CMD_DEBUG_READ_SELECT          0x37
 
 #define CMD_GFRC_READ_LO               0x42
 #define CMD_GFRC_READ_HI               0x43
+#define CMD_GFRC_SET_CORE              0x47
+#define CMD_GFRC_READ_CORE             0x48
 
 #define CMD_IDU_ENABLE                 0x71
 #define CMD_IDU_DISABLE                        0x72
index 91a31ffed828ddfbad55967022d3a1df32db5340..9a781f0611df0280be7d952258f486f805f27528 100644 (file)
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
 
 struct drm_virtgpu_getparam {
        __u64 param;
index 20d1490d63773288d1d91130e96478ee7708bf26..3c50e07ee833116a726c19402f26cf63e91491e9 100644 (file)
@@ -131,7 +131,7 @@ enum {
 #define BLKTRACE_BDEV_SIZE     32
 
 /*
- * User setup structure passed with BLKTRACESTART
+ * User setup structure passed with BLKTRACESETUP
  */
 struct blk_user_trace_setup {
        char name[BLKTRACE_BDEV_SIZE];  /* output */
index 5f3c5a918f00d5ed3cb2c82b8803edeae456cad9..b4112f0b6dd36c33299930f4ffc16fc5230e465f 100644 (file)
@@ -211,6 +211,32 @@ struct dmx_stc {
        __u64 stc;
 };
 
+/**
+ * enum dmx_buffer_flags - DMX memory-mapped buffer flags
+ *
+ * @DMX_BUFFER_FLAG_HAD_CRC32_DISCARD:
+ *     Indicates that the Kernel discarded one or more frames due to wrong
+ *     CRC32 checksum.
+ * @DMX_BUFFER_FLAG_TEI:
+ *     Indicates that the Kernel has detected a Transport Error indicator
+ *     (TEI) on a filtered pid.
+ * @DMX_BUFFER_PKT_COUNTER_MISMATCH:
+ *     Indicates that the Kernel has detected a packet counter mismatch
+ *     on a filtered pid.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED:
+ *     Indicates that the Kernel has detected one or more frame discontinuity.
+ * @DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR:
+ *     Received at least one packet with a frame discontinuity indicator.
+ */
+
+enum dmx_buffer_flags {
+       DMX_BUFFER_FLAG_HAD_CRC32_DISCARD               = 1 << 0,
+       DMX_BUFFER_FLAG_TEI                             = 1 << 1,
+       DMX_BUFFER_PKT_COUNTER_MISMATCH                 = 1 << 2,
+       DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED          = 1 << 3,
+       DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR         = 1 << 4,
+};
+
 /**
  * struct dmx_buffer - dmx buffer info
  *
@@ -220,15 +246,24 @@ struct dmx_stc {
  *             offset from the start of the device memory for this plane,
  *             (or a "cookie" that should be passed to mmap() as offset)
  * @length:    size in bytes of the buffer
+ * @flags:     bit array of buffer flags as defined by &enum dmx_buffer_flags.
+ *             Filled only at &DMX_DQBUF.
+ * @count:     monotonic counter for filled buffers. Helps to identify
+ *             data stream loses. Filled only at &DMX_DQBUF.
  *
  * Contains data exchanged by application and driver using one of the streaming
  * I/O methods.
+ *
+ * Please notice that, for &DMX_QBUF, only @index should be filled.
+ * On &DMX_DQBUF calls, all fields will be filled by the Kernel.
  */
 struct dmx_buffer {
        __u32                   index;
        __u32                   bytesused;
        __u32                   offset;
        __u32                   length;
+       __u32                   flags;
+       __u32                   count;
 };
 
 /**
index f8cb5760ea4fb3182f49b411e422d996ef467d60..8bbbcb5cd94b447069e725c0473567300cc4ed57 100644 (file)
@@ -23,7 +23,6 @@
 #define _UAPI_LINUX_IF_ETHER_H
 
 #include <linux/types.h>
-#include <linux/libc-compat.h>
 
 /*
  *     IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
  *     This is an Ethernet frame header.
  */
 
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR              1
+#endif
+
 #if __UAPI_DEF_ETHHDR
 struct ethhdr {
        unsigned char   h_dest[ETH_ALEN];       /* destination eth addr */
index 0fb5ef939732517293f222f2c85d88f2b4c1e973..7b26d4b0b0529649816ec1523d225eec7fa8ee26 100644 (file)
@@ -761,6 +761,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_TRACE_PAUSE           __KVM_DEPRECATED_MAIN_0x07
 #define KVM_TRACE_DISABLE         __KVM_DEPRECATED_MAIN_0x08
 #define KVM_GET_EMULATED_CPUID   _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
+#define KVM_GET_MSR_FEATURE_INDEX_LIST    _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
 
 /*
  * Extension capability list.
@@ -934,6 +935,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_AIS_MIGRATION 150
 #define KVM_CAP_PPC_GET_CPU_CHAR 151
 #define KVM_CAP_S390_BPB 152
+#define KVM_CAP_GET_MSR_FEATURES 153
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index fc29efaa918cb76fab92a65890cca1aab3ecf9ba..8254c937c9f45507cd14d400a6dd4b0f8dcecd43 100644 (file)
 
 #endif /* __GLIBC__ */
 
-/* Definitions for if_ether.h */
-/* allow libcs like musl to deactivate this, glibc does not implement this. */
-#ifndef __UAPI_DEF_ETHHDR
-#define __UAPI_DEF_ETHHDR              1
-#endif
-
 #endif /* _UAPI_LIBC_COMPAT_H */
index 3d77fe91239a802367634d3cc29fd3903c1cee19..9008f31c7eb65c90a487d99d3f371a00ba526f3f 100644 (file)
@@ -42,7 +42,7 @@ typedef enum {
        SEV_RET_INVALID_PLATFORM_STATE,
        SEV_RET_INVALID_GUEST_STATE,
        SEV_RET_INAVLID_CONFIG,
-       SEV_RET_INVALID_len,
+       SEV_RET_INVALID_LEN,
        SEV_RET_ALREADY_OWNED,
        SEV_RET_INVALID_CERTIFICATE,
        SEV_RET_POLICY_FAILURE,
index e46d82b911669700662e5e4ec321b26db625f150..d5a1b8a492b93ddb6dd41cf23d759c4059bc8660 100644 (file)
@@ -69,8 +69,8 @@ struct ptrace_peeksiginfo_args {
 #define PTRACE_SECCOMP_GET_METADATA    0x420d
 
 struct seccomp_metadata {
-       unsigned long filter_off;       /* Input: which filter */
-       unsigned int flags;             /* Output: filter's flags */
+       __u64 filter_off;       /* Input: which filter */
+       __u64 flags;            /* Output: filter's flags */
 };
 
 /* Read signals from a shared (process wide) queue */
index 03557b5f9aa6b80031470ba0d317782ffa60f29c..46de0885e8001d3b3e1b8bbef0d1a8bad713e584 100644 (file)
@@ -65,7 +65,7 @@ struct ib_uverbs_attr {
        __u16 len;              /* only for pointers */
        __u16 flags;            /* combination of UVERBS_ATTR_F_XXXX */
        __u16 reserved;
-       __u64 data;             /* ptr to command, inline data or idr/fd */
+       __aligned_u64 data;     /* ptr to command, inline data or idr/fd */
 };
 
 struct ib_uverbs_ioctl_hdr {
@@ -73,7 +73,7 @@ struct ib_uverbs_ioctl_hdr {
        __u16 object_id;
        __u16 method_id;
        __u16 num_attrs;
-       __u64 reserved;
+       __aligned_u64 reserved;
        struct ib_uverbs_attr  attrs[0];
 };
 
index a8100b9548398e8b102052f2c1418b21ea423825..969eaf140ef0a5d356e2c9a085b5d2a86c215eba 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/rodata_test.h>
+#include <linux/jump_label.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -1000,6 +1001,7 @@ static int __ref kernel_init(void *unused)
        /* need to finish all async __init code before freeing the memory */
        async_synchronize_full();
        ftrace_free_init_mem();
+       jump_label_invalidate_init();
        free_initmem();
        mark_readonly();
        system_state = SYSTEM_RUNNING;
index b1f66480135b3d8e21fedef41e71e0dfb3ba01c1..14750e7c5ee4872e4a7426e960bea7ae001e6623 100644 (file)
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
 {
        int i;
 
-       for (i = 0; i < array->map.max_entries; i++)
+       for (i = 0; i < array->map.max_entries; i++) {
                free_percpu(array->pptrs[i]);
+               cond_resched();
+       }
 }
 
 static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
                        return -ENOMEM;
                }
                array->pptrs[i] = ptr;
+               cond_resched();
        }
 
        return 0;
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
-       int numa_node = bpf_map_attr_numa_node(attr);
+       int ret, numa_node = bpf_map_attr_numa_node(attr);
        u32 elem_size, index_mask, max_entries;
        bool unpriv = !capable(CAP_SYS_ADMIN);
+       u64 cost, array_size, mask64;
        struct bpf_array *array;
-       u64 array_size, mask64;
 
        elem_size = round_up(attr->value_size, 8);
 
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
                array_size += (u64) max_entries * elem_size;
 
        /* make sure there is no u32 overflow later in round_up() */
-       if (array_size >= U32_MAX - PAGE_SIZE)
+       cost = array_size;
+       if (cost >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-ENOMEM);
+       if (percpu) {
+               cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
+               if (cost >= U32_MAX - PAGE_SIZE)
+                       return ERR_PTR(-ENOMEM);
+       }
+       cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+       ret = bpf_map_precharge_memlock(cost);
+       if (ret < 0)
+               return ERR_PTR(ret);
 
        /* allocate all map elements and zero-initialize them */
        array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        /* copy mandatory map attributes */
        bpf_map_init_from_attr(&array->map, attr);
+       array->map.pages = cost;
        array->elem_size = elem_size;
 
-       if (!percpu)
-               goto out;
-
-       array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
-
-       if (array_size >= U32_MAX - PAGE_SIZE ||
-           bpf_array_alloc_percpu(array)) {
+       if (percpu && bpf_array_alloc_percpu(array)) {
                bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }
-out:
-       array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
 
        return &array->map;
 }
index 29ca9208dcfadded0633c35387ddb3cd35be6b8b..d315b393abdd0f7dfa67abd706e0f973c3ef2c5a 100644 (file)
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
         * so always copy 'cnt' prog_ids to the user.
         * In a rare race the user will see zero prog_ids
         */
-       ids = kcalloc(cnt, sizeof(u32), GFP_USER);
+       ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
        if (!ids)
                return -ENOMEM;
        rcu_read_lock();
index fbfdada6caeefa14b65e46e57f965c150a817fba..a4bb0b34375a6c652f49d7719c1bd7e0e02c89e0 100644 (file)
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
 static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
                                                       int map_id)
 {
-       gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
+       gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
        struct bpf_cpu_map_entry *rcpu;
        int numa, err;
 
index 7b469d10d0e93a3e6b37fdce57f1e78082c6c9e4..b4b5b81e7251e6fab9530d9d0dd2f343d2045c0e 100644 (file)
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
        struct lpm_trie_node __rcu **slot;
        struct lpm_trie_node *node;
 
-       raw_spin_lock(&trie->lock);
+       /* Wait for outstanding programs to complete
+        * update/lookup/delete/get_next_key and free the trie.
+        */
+       synchronize_rcu();
 
        /* Always start at the root and walk down to a node that has no
         * children. Then free that node, nullify its reference in the parent
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
                slot = &trie->root;
 
                for (;;) {
-                       node = rcu_dereference_protected(*slot,
-                                       lockdep_is_held(&trie->lock));
+                       node = rcu_dereference_protected(*slot, 1);
                        if (!node)
-                               goto unlock;
+                               goto out;
 
                        if (rcu_access_pointer(node->child[0])) {
                                slot = &node->child[0];
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
                }
        }
 
-unlock:
-       raw_spin_unlock(&trie->lock);
+out:
+       kfree(trie);
 }
 
 static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
index 48c33417d13c0ad40154f25aeade0c9b4cafd96a..a927e89dad6e9591066c3a87afc497a196ebd887 100644 (file)
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
 static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 {
        struct bpf_stab *stab;
-       int err = -EINVAL;
        u64 cost;
+       int err;
 
        if (!capable(CAP_NET_ADMIN))
                return ERR_PTR(-EPERM);
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 
        /* make sure page count doesn't overflow */
        cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+       err = -EINVAL;
        if (cost >= U32_MAX - PAGE_SIZE)
                goto free_stab;
 
index a17fdb63dc3e470955dcfd5e5861920b9db2749f..6a5b61ebc66c956e6eeee6537786f4161d98f2ef 100644 (file)
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
        return e;
 }
 
-static inline int init_kernel_text(unsigned long addr)
+int init_kernel_text(unsigned long addr)
 {
        if (addr >= (unsigned long)_sinittext &&
            addr < (unsigned long)_einittext)
index be8aa5b986662bb2164e508bdb59cf39a81d3f0a..e5d9d405ae4e55ce862318a152609cd80e7e0c6e 100644 (file)
@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
  * is dropped: either by a lazy thread or by
  * mmput. Free the page directory and the mm.
  */
-static void __mmdrop(struct mm_struct *mm)
+void __mmdrop(struct mm_struct *mm)
 {
        BUG_ON(mm == &init_mm);
        mm_free_pgd(mm);
@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
        put_user_ns(mm->user_ns);
        free_mm(mm);
 }
-
-void mmdrop(struct mm_struct *mm)
-{
-       /*
-        * The implicit full barrier implied by atomic_dec_and_test() is
-        * required by the membarrier system call before returning to
-        * user-space, after storing to rq->curr.
-        */
-       if (unlikely(atomic_dec_and_test(&mm->mm_count)))
-               __mmdrop(mm);
-}
-EXPORT_SYMBOL_GPL(mmdrop);
+EXPORT_SYMBOL_GPL(__mmdrop);
 
 static void mmdrop_async_fn(struct work_struct *work)
 {
index 5187dfe809ac46eede7a8163bd86a88e8a186379..4c5770407031f083dae0a6fcbc3f20abb8a30146 100644 (file)
@@ -16,6 +16,7 @@ struct cpumap {
        unsigned int            available;
        unsigned int            allocated;
        unsigned int            managed;
+       bool                    initialized;
        bool                    online;
        unsigned long           alloc_map[IRQ_MATRIX_SIZE];
        unsigned long           managed_map[IRQ_MATRIX_SIZE];
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m)
 
        BUG_ON(cm->online);
 
-       bitmap_zero(cm->alloc_map, m->matrix_bits);
-       cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
-       cm->allocated = 0;
+       if (!cm->initialized) {
+               cm->available = m->alloc_size;
+               cm->available -= cm->managed + m->systembits_inalloc;
+               cm->initialized = true;
+       }
        m->global_available += cm->available;
        cm->online = true;
        m->online_maps++;
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
        if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
                return;
 
-       if (cm->online) {
-               clear_bit(bit, cm->alloc_map);
-               cm->allocated--;
+       clear_bit(bit, cm->alloc_map);
+       cm->allocated--;
+
+       if (cm->online)
                m->total_allocated--;
-               if (!managed) {
-                       cm->available++;
+
+       if (!managed) {
+               cm->available++;
+               if (cm->online)
                        m->global_available++;
-               }
        }
        trace_irq_matrix_free(bit, cpu, m, cm);
 }
index b4517095db6af2f5130e6c871a7a9e32884c2a66..52a0a7af8640b6b51dade855b6b18468589dff2c 100644 (file)
@@ -366,12 +366,15 @@ static void __jump_label_update(struct static_key *key,
 {
        for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
                /*
-                * entry->code set to 0 invalidates module init text sections
-                * kernel_text_address() verifies we are not in core kernel
-                * init code, see jump_label_invalidate_module_init().
+                * An entry->code of 0 indicates an entry which has been
+                * disabled because it was in an init text area.
                 */
-               if (entry->code && kernel_text_address(entry->code))
-                       arch_jump_label_transform(entry, jump_label_type(entry));
+               if (entry->code) {
+                       if (kernel_text_address(entry->code))
+                               arch_jump_label_transform(entry, jump_label_type(entry));
+                       else
+                               WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
+               }
        }
 }
 
@@ -417,6 +420,19 @@ void __init jump_label_init(void)
        cpus_read_unlock();
 }
 
+/* Disable any jump label entries in __init code */
+void __init jump_label_invalidate_init(void)
+{
+       struct jump_entry *iter_start = __start___jump_table;
+       struct jump_entry *iter_stop = __stop___jump_table;
+       struct jump_entry *iter;
+
+       for (iter = iter_start; iter < iter_stop; iter++) {
+               if (init_kernel_text(iter->code))
+                       iter->code = 0;
+       }
+}
+
 #ifdef CONFIG_MODULES
 
 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
@@ -633,6 +649,7 @@ static void jump_label_del_module(struct module *mod)
        }
 }
 
+/* Disable any jump label entries in module init code */
 static void jump_label_invalidate_module_init(struct module *mod)
 {
        struct jump_entry *iter_start = mod->jump_entries;
index fc1123583fa6edadb19214bb1922f7ce4ccdf1e1..f274fbef821d1bee0c457da42417f4b8153444cf 100644 (file)
@@ -2397,7 +2397,7 @@ skip:
 
                if (console_lock_spinning_disable_and_check()) {
                        printk_safe_exit_irqrestore(flags);
-                       return;
+                       goto out;
                }
 
                printk_safe_exit_irqrestore(flags);
@@ -2430,6 +2430,7 @@ skip:
        if (retry && console_trylock())
                goto again;
 
+out:
        if (wake_klogd)
                wake_up_klogd();
 }
index c3029402f15c3367bdd3d59210eee471b6bb41c0..c955b10c973c0444ec491c7e6b3779b77fe1fa15 100644 (file)
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
 {
        struct rchan_buf *buf;
 
-       if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
+       if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
                return NULL;
 
        buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
index 940fa408a288f778fed1e61fa004cbf6e8fdac75..dc77548167ef0993487a61bcf6cbbabb2f6f2434 100644 (file)
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task,
 
        size = min_t(unsigned long, size, sizeof(kmd));
 
-       if (copy_from_user(&kmd, data, size))
+       if (size < sizeof(kmd.filter_off))
+               return -EINVAL;
+
+       if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
                return -EFAULT;
 
        filter = get_nth_filter(task, kmd.filter_off);
        if (IS_ERR(filter))
                return PTR_ERR(filter);
 
-       memset(&kmd, 0, sizeof(kmd));
        if (filter->log)
                kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
 
index fc2838ac8b7877b1ac16879dbdb178224eb1f0f3..c0a9e310d71501948d60de5f499aa17cb087eaf6 100644 (file)
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
                return -EINVAL;
        if (copy_from_user(&query, uquery, sizeof(query)))
                return -EFAULT;
+       if (query.ids_len > BPF_TRACE_MAX_PROGS)
+               return -E2BIG;
 
        mutex_lock(&bpf_event_mutex);
        ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
index 9a20acce460d53cf23f7057c7dc122b685ae639d..36288d8406756400f5008ee0631b810301504e0d 100644 (file)
@@ -101,6 +101,7 @@ struct user_struct root_user = {
        .sigpending     = ATOMIC_INIT(0),
        .locked_shm     = 0,
        .uid            = GLOBAL_ROOT_UID,
+       .ratelimit      = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
 };
 
 /*
@@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid)
 
                new->uid = uid;
                atomic_set(&new->__count, 1);
+               ratelimit_state_init(&new->ratelimit, HZ, 100);
+               ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
 
                /*
                 * Before adding this, check whether we raced
index 017044c2623373455274a1d642cecc7e3c4811aa..bb9a519cbf5093ece372daa532121f1511957999 100644 (file)
@@ -4179,6 +4179,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 }
 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
+/**
+ * current_work - retrieve %current task's work struct
+ *
+ * Determine if %current task is a workqueue worker and what it's working on.
+ * Useful to find out the context that the %current task is running in.
+ *
+ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
+ */
+struct work_struct *current_work(void)
+{
+       struct worker *worker = current_wq_worker();
+
+       return worker ? worker->current_work : NULL;
+}
+EXPORT_SYMBOL(current_work);
+
 /**
  * current_is_workqueue_rescuer - is %current workqueue rescuer?
  *
index 6088408ef26c5471146ee0c6a149f4ba1086d015..64155e310a9f2119ee7443268e767fc74b32043e 100644 (file)
@@ -1642,6 +1642,7 @@ config DMA_API_DEBUG
 
 menuconfig RUNTIME_TESTING_MENU
        bool "Runtime Testing"
+       def_bool y
 
 if RUNTIME_TESTING_MENU
 
index 1b34d210452c5aba703aea04e648dd6ed56fd576..7f5cdc1e6b298f2c2121ebc33195fbc7c7d71145 100644 (file)
@@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
        if (unlikely(virt == NULL))
                return;
 
-       entry = dma_entry_alloc();
-       if (!entry)
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
                return;
 
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+       entry = dma_entry_alloc();
+       if (!entry)
                return;
 
        entry->type      = dma_debug_coherent;
@@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
        };
 
        /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_to_page(virt))
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
                return;
 
        if (is_vmalloc_addr(virt))
index c98d77fcf3934d9e47b33ba4d02b509c25273d31..823b813f08f862b4c80463ad69f91ba9acf62703 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
 {
        struct radix_tree_iter iter;
        void __rcu **slot;
-       int base = idr->idr_base;
-       int id = *nextid;
+       unsigned int base = idr->idr_base;
+       unsigned int id = *nextid;
 
        if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
                return -EINVAL;
@@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr,
 
        radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
                int ret;
+               unsigned long id = iter.index + base;
 
-               if (WARN_ON_ONCE(iter.index > INT_MAX))
+               if (WARN_ON_ONCE(id > INT_MAX))
                        break;
-               ret = fn(iter.index + base, rcu_dereference_raw(*slot), data);
+               ret = fn(id, rcu_dereference_raw(*slot), data);
                if (ret)
                        return ret;
        }
@@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid)
 {
        struct radix_tree_iter iter;
        void __rcu **slot;
-       int base = idr->idr_base;
-       int id = *nextid;
+       unsigned long base = idr->idr_base;
+       unsigned long id = *nextid;
 
        id = (id < base) ? 0 : id - base;
        slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
@@ -431,7 +432,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
                        bitmap = this_cpu_xchg(ida_bitmap, NULL);
                        if (!bitmap)
                                return -EAGAIN;
-                       memset(bitmap, 0, sizeof(*bitmap));
                        bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
                        rcu_assign_pointer(*slot, bitmap);
                }
@@ -464,7 +464,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
                        bitmap = this_cpu_xchg(ida_bitmap, NULL);
                        if (!bitmap)
                                return -EAGAIN;
-                       memset(bitmap, 0, sizeof(*bitmap));
                        __set_bit(bit, bitmap->bitmap);
                        radix_tree_iter_replace(root, &iter, slot, bitmap);
                }
index 0a7ae3288a248e239ee8057e66b914289f2aa693..8e00138d593fd3acf09716e7edf6c769c9ceee5b 100644 (file)
@@ -2125,7 +2125,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
                preempt_enable();
 
        if (!this_cpu_read(ida_bitmap)) {
-               struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
+               struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
                if (!bitmap)
                        return 0;
                if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
index 77ee6ced11b17a411d95f266faeaaf7dba309d4f..d7a708f82559ca38e768cb93fc0febb999711e4c 100644 (file)
@@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
 {
        const int default_width = 2 * sizeof(void *);
 
-       if (!ptr && *fmt != 'K') {
+       if (!ptr && *fmt != 'K' && *fmt != 'x') {
                /*
                 * Print (null) with the same width as a pointer so it makes
                 * tabular output look nice.
index 79398200e423b033e71a9f2a7210811c627020a0..74e5a6547c3ddb777e1b452e445d82bacbb936c7 100644 (file)
@@ -64,6 +64,12 @@ void clear_page_mlock(struct page *page)
        mod_zone_page_state(page_zone(page), NR_MLOCK,
                            -hpage_nr_pages(page));
        count_vm_event(UNEVICTABLE_PGCLEARED);
+       /*
+        * The previous TestClearPageMlocked() corresponds to the smp_mb()
+        * in __pagevec_lru_add_fn().
+        *
+        * See __pagevec_lru_add_fn for more explanation.
+        */
        if (!isolate_lru_page(page)) {
                putback_lru_page(page);
        } else {
index 81e18ceef579cd7726dfbabc45a5fbfd133a837b..cb416723538fe49810db0cb9b9e0b165cb963a44 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/stop_machine.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
+#include <xen/xen.h>
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
@@ -347,6 +348,9 @@ static inline bool update_defer_init(pg_data_t *pgdat,
        /* Always populate low zones for address-constrained allocations */
        if (zone_end < pgdat_end_pfn(pgdat))
                return true;
+       /* Xen PV domains need page structures early */
+       if (xen_pv_domain())
+               return true;
        (*nr_initialised)++;
        if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
index 567a7b96e41d63a06fe9f2332e7141161ee141a0..0f17330dd0e5a8552ecbfeaeb6c7139173cb6d3b 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -445,30 +445,6 @@ void lru_cache_add(struct page *page)
        __lru_cache_add(page);
 }
 
-/**
- * add_page_to_unevictable_list - add a page to the unevictable list
- * @page:  the page to be added to the unevictable list
- *
- * Add page directly to its zone's unevictable list.  To avoid races with
- * tasks that might be making the page evictable, through eg. munlock,
- * munmap or exit, while it's not on the lru, we want to add the page
- * while it's locked or otherwise "invisible" to other tasks.  This is
- * difficult to do when using the pagevec cache, so bypass that.
- */
-void add_page_to_unevictable_list(struct page *page)
-{
-       struct pglist_data *pgdat = page_pgdat(page);
-       struct lruvec *lruvec;
-
-       spin_lock_irq(&pgdat->lru_lock);
-       lruvec = mem_cgroup_page_lruvec(page, pgdat);
-       ClearPageActive(page);
-       SetPageUnevictable(page);
-       SetPageLRU(page);
-       add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
-       spin_unlock_irq(&pgdat->lru_lock);
-}
-
 /**
  * lru_cache_add_active_or_unevictable
  * @page:  the page to be added to LRU
@@ -484,13 +460,9 @@ void lru_cache_add_active_or_unevictable(struct page *page,
 {
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
-       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
+       if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
                SetPageActive(page);
-               lru_cache_add(page);
-               return;
-       }
-
-       if (!TestSetPageMlocked(page)) {
+       else if (!TestSetPageMlocked(page)) {
                /*
                 * We use the irq-unsafe __mod_zone_page_stat because this
                 * counter is not modified from interrupt context, and the pte
@@ -500,7 +472,7 @@ void lru_cache_add_active_or_unevictable(struct page *page,
                                    hpage_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
        }
-       add_page_to_unevictable_list(page);
+       lru_cache_add(page);
 }
 
 /*
@@ -886,15 +858,55 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
                                 void *arg)
 {
-       int file = page_is_file_cache(page);
-       int active = PageActive(page);
-       enum lru_list lru = page_lru(page);
+       enum lru_list lru;
+       int was_unevictable = TestClearPageUnevictable(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);
 
        SetPageLRU(page);
+       /*
+        * Page becomes evictable in two ways:
+        * 1) Within LRU lock [munlock_vma_pages() and __munlock_pagevec()].
+        * 2) Before acquiring LRU lock to put the page to correct LRU and then
+        *   a) do PageLRU check with lock [check_move_unevictable_pages]
+        *   b) do PageLRU check before lock [clear_page_mlock]
+        *
+        * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
+        * following strict ordering:
+        *
+        * #0: __pagevec_lru_add_fn             #1: clear_page_mlock
+        *
+        * SetPageLRU()                         TestClearPageMlocked()
+        * smp_mb() // explicit ordering        // above provides strict
+        *                                      // ordering
+        * PageMlocked()                        PageLRU()
+        *
+        *
+        * if '#1' does not observe setting of PG_lru by '#0' and fails
+        * isolation, the explicit barrier will make sure that page_evictable
+        * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
+        * can be reordered after PageMlocked check and can make '#1' to fail
+        * the isolation of the page whose Mlocked bit is cleared (#0 is also
+        * looking at the same page) and the evictable page will be stranded
+        * in an unevictable LRU.
+        */
+       smp_mb();
+
+       if (page_evictable(page)) {
+               lru = page_lru(page);
+               update_page_reclaim_stat(lruvec, page_is_file_cache(page),
+                                        PageActive(page));
+               if (was_unevictable)
+                       count_vm_event(UNEVICTABLE_PGRESCUED);
+       } else {
+               lru = LRU_UNEVICTABLE;
+               ClearPageActive(page);
+               SetPageUnevictable(page);
+               if (!was_unevictable)
+                       count_vm_event(UNEVICTABLE_PGCULLED);
+       }
+
        add_page_to_lru_list(page, lruvec, lru);
-       update_page_reclaim_stat(lruvec, file, active);
        trace_mm_lru_insertion(page, lru);
 }
 
@@ -913,7 +925,7 @@ EXPORT_SYMBOL(__pagevec_lru_add);
  * @pvec:      Where the resulting entries are placed
  * @mapping:   The address_space to search
  * @start:     The starting entry index
- * @nr_pages:  The maximum number of pages
+ * @nr_entries:        The maximum number of pages
  * @indices:   The cache indices corresponding to the entries in @pvec
  *
  * pagevec_lookup_entries() will search for and return a group of up
index 673942094328a710b059b2b50e149ce7eb3d5f11..ebff729cc9562709500353f30121f2c423296cc8 100644 (file)
@@ -1943,11 +1943,15 @@ void *vmalloc_exec(unsigned long size)
 }
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
-#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
+#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
-#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
+#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
 #else
-#define GFP_VMALLOC32 GFP_KERNEL
+/*
+ * 64b systems should always have either DMA or DMA32 zones. For others
+ * GFP_DMA32 should do the right thing and use the normal zone.
+ */
+#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
 #endif
 
 /**
index 444749669187e189b98fcbbe0050413a9c84b20c..bee53495a829299f766d11ffd316acdb54cfffdf 100644 (file)
@@ -769,64 +769,7 @@ int remove_mapping(struct address_space *mapping, struct page *page)
  */
 void putback_lru_page(struct page *page)
 {
-       bool is_unevictable;
-       int was_unevictable = PageUnevictable(page);
-
-       VM_BUG_ON_PAGE(PageLRU(page), page);
-
-redo:
-       ClearPageUnevictable(page);
-
-       if (page_evictable(page)) {
-               /*
-                * For evictable pages, we can use the cache.
-                * In event of a race, worst case is we end up with an
-                * unevictable page on [in]active list.
-                * We know how to handle that.
-                */
-               is_unevictable = false;
-               lru_cache_add(page);
-       } else {
-               /*
-                * Put unevictable pages directly on zone's unevictable
-                * list.
-                */
-               is_unevictable = true;
-               add_page_to_unevictable_list(page);
-               /*
-                * When racing with an mlock or AS_UNEVICTABLE clearing
-                * (page is unlocked) make sure that if the other thread
-                * does not observe our setting of PG_lru and fails
-                * isolation/check_move_unevictable_pages,
-                * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
-                * the page back to the evictable list.
-                *
-                * The other side is TestClearPageMlocked() or shmem_lock().
-                */
-               smp_mb();
-       }
-
-       /*
-        * page's status can change while we move it among lru. If an evictable
-        * page is on unevictable list, it never be freed. To avoid that,
-        * check after we added it to the list, again.
-        */
-       if (is_unevictable && page_evictable(page)) {
-               if (!isolate_lru_page(page)) {
-                       put_page(page);
-                       goto redo;
-               }
-               /* This means someone else dropped this page from LRU
-                * So, it will be freed or putback to LRU again. There is
-                * nothing to do here.
-                */
-       }
-
-       if (was_unevictable && !is_unevictable)
-               count_vm_event(UNEVICTABLE_PGRESCUED);
-       else if (!was_unevictable && is_unevictable)
-               count_vm_event(UNEVICTABLE_PGCULLED);
-
+       lru_cache_add(page);
        put_page(page);         /* drop ref from isolate */
 }
 
index f8cb83e7699bba6f015fa60fa35bab214c71975e..01a771e304fab17bf2ed884e415a080524393385 100644 (file)
@@ -360,7 +360,7 @@ u64 zpool_get_total_size(struct zpool *zpool)
 
 /**
  * zpool_evictable() - Test if zpool is potentially evictable
- * @pool       The zpool to test
+ * @zpool:     The zpool to test
  *
  * Zpool is only potentially evictable when it's created with struct
  * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
index c004aa4fd3f481e6686ecdfe78f64885637556b9..61a5c41972dba22bc35179a13d40e900dea679b6 100644 (file)
@@ -1007,6 +1007,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        u8 *src, *dst;
        struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
 
+       /* THP isn't supported */
+       if (PageTransHuge(page)) {
+               ret = -EINVAL;
+               goto reject;
+       }
+
        if (!zswap_enabled || !tree) {
                ret = -ENODEV;
                goto reject;
index 0254c35b2bf002d0bb4f0b52d7895617ce066e24..126a8ea73c9681dc2772423715f4717e2d1ec409 100644 (file)
@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
        struct brport_attribute *brport_attr = to_brport_attr(attr);
        struct net_bridge_port *p = to_brport(kobj);
 
+       if (!brport_attr->show)
+               return -EINVAL;
+
        return brport_attr->show(p, buf);
 }
 
index 279527f8b1fe74f30d75da640da8ef23f789ba3c..ce7152a12bd8652f2941a5e643d2e4200400d756 100644 (file)
@@ -187,17 +187,17 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
        expected_length += ebt_mac_wormhash_size(wh_src);
 
        if (em->match_size != EBT_ALIGN(expected_length)) {
-               pr_info("wrong size: %d against expected %d, rounded to %zd\n",
-                       em->match_size, expected_length,
-                       EBT_ALIGN(expected_length));
+               pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n",
+                                  em->match_size, expected_length,
+                                  EBT_ALIGN(expected_length));
                return -EINVAL;
        }
        if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
-               pr_info("dst integrity fail: %x\n", -err);
+               pr_err_ratelimited("dst integrity fail: %x\n", -err);
                return -EINVAL;
        }
        if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
-               pr_info("src integrity fail: %x\n", -err);
+               pr_err_ratelimited("src integrity fail: %x\n", -err);
                return -EINVAL;
        }
        return 0;
index 61a9f1be1263afc95dfbc43cdffe5d0a6e8e7246..165b9d678cf1d3a199142ecd97c7e7995f23b0ac 100644 (file)
@@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
        /* Check for overflow. */
        if (info->burst == 0 ||
            user2credits(info->avg * info->burst) < user2credits(info->avg)) {
-               pr_info("overflow, try lower: %u/%u\n",
-                       info->avg, info->burst);
+               pr_info_ratelimited("overflow, try lower: %u/%u\n",
+                                   info->avg, info->burst);
                return -EINVAL;
        }
 
index 1e492ef2a33d945699a327831640db04c1f158fa..4d4c82229e9e21cea5ab7011aaf03e7a28e2b394 100644 (file)
@@ -418,6 +418,7 @@ ceph_parse_options(char *options, const char *dev_name,
                                opt->flags |= CEPH_OPT_FSID;
                        break;
                case Opt_name:
+                       kfree(opt->name);
                        opt->name = kstrndup(argstr[0].from,
                                              argstr[0].to-argstr[0].from,
                                              GFP_KERNEL);
@@ -427,6 +428,9 @@ ceph_parse_options(char *options, const char *dev_name,
                        }
                        break;
                case Opt_secret:
+                       ceph_crypto_key_destroy(opt->key);
+                       kfree(opt->key);
+
                        opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
                        if (!opt->key) {
                                err = -ENOMEM;
@@ -437,6 +441,9 @@ ceph_parse_options(char *options, const char *dev_name,
                                goto out;
                        break;
                case Opt_key:
+                       ceph_crypto_key_destroy(opt->key);
+                       kfree(opt->key);
+
                        opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
                        if (!opt->key) {
                                err = -ENOMEM;
index dda9d7b9a840702d13b518507fef7a3723fce017..d4362befe7e26dc4784c1989ad29ab2a00b79642 100644 (file)
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
  */
 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
 {
+       bool disabling;
        int rc;
 
+       disabling = txq < dev->real_num_tx_queues;
+
        if (txq < 1 || txq > dev->num_tx_queues)
                return -EINVAL;
 
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
                if (dev->num_tc)
                        netif_setup_tc(dev, txq);
 
-               if (txq < dev->real_num_tx_queues) {
+               dev->real_num_tx_queues = txq;
+
+               if (disabling) {
+                       synchronize_net();
                        qdisc_reset_all_tx_gt(dev, txq);
 #ifdef CONFIG_XPS
                        netif_reset_xps_queues_gt(dev, txq);
 #endif
                }
+       } else {
+               dev->real_num_tx_queues = txq;
        }
 
-       dev->real_num_tx_queues = txq;
        return 0;
 }
 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
index 08ab4c65a998db9458ca92176fbc1e465fde255a..0c121adbdbaaac5ed1f48a8e618cc0a2af2039d6 100644 (file)
@@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
        struct sock *sk = bpf_sock->sk;
        int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
 
-       if (!sk_fullsock(sk))
+       if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
                return -EINVAL;
 
-#ifdef CONFIG_INET
        if (val)
                tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
 
        return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
-#else
-       return -EINVAL;
-#endif
 }
 
 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
index 0a3f88f08727f1f1217560407ff539c8a8c17496..98fd12721221e4aa26e4d11be9de6c0305fb6dd9 100644 (file)
@@ -66,6 +66,7 @@ struct net_rate_estimator {
 static void est_fetch_counters(struct net_rate_estimator *e,
                               struct gnet_stats_basic_packed *b)
 {
+       memset(b, 0, sizeof(*b));
        if (e->stats_lock)
                spin_lock(e->stats_lock);
 
index 91dd09f798089e1a1144579f128e5c6dae3a9c0e..791aff68af88537dd8fcab0cbd4d844bb7a25807 100644 (file)
@@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
        lock_sock(sk);
        err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
        release_sock(sk);
+#ifdef CONFIG_NETFILTER
+       /* we need to exclude all possible ENOPROTOOPTs except default case */
+       if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
+           optname != DSO_STREAM && optname != DSO_SEQPACKET)
+               err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
+#endif
 
        return err;
 }
@@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
                dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
                break;
 
-       default:
-#ifdef CONFIG_NETFILTER
-               return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
-#endif
-       case DSO_LINKINFO:
-       case DSO_STREAM:
-       case DSO_SEQPACKET:
-               return -ENOPROTOOPT;
-
        case DSO_MAXWINDOW:
                if (optlen != sizeof(unsigned long))
                        return -EINVAL;
@@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
                        return -EINVAL;
                scp->info_loc = u.info;
                break;
+
+       case DSO_LINKINFO:
+       case DSO_STREAM:
+       case DSO_SEQPACKET:
+       default:
+               return -ENOPROTOOPT;
        }
 
        return 0;
@@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
        lock_sock(sk);
        err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
        release_sock(sk);
+#ifdef CONFIG_NETFILTER
+       if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
+           optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
+           optname != DSO_CONREJECT) {
+               int len;
+
+               if (get_user(len, optlen))
+                       return -EFAULT;
+
+               err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
+               if (err >= 0)
+                       err = put_user(len, optlen);
+       }
+#endif
 
        return err;
 }
@@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
                r_data = &link;
                break;
 
-       default:
-#ifdef CONFIG_NETFILTER
-       {
-               int ret, len;
-
-               if (get_user(len, optlen))
-                       return -EFAULT;
-
-               ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
-               if (ret >= 0)
-                       ret = put_user(len, optlen);
-               return ret;
-       }
-#endif
-       case DSO_STREAM:
-       case DSO_SEQPACKET:
-       case DSO_CONACCEPT:
-       case DSO_CONREJECT:
-               return -ENOPROTOOPT;
-
        case DSO_MAXWINDOW:
                if (r_len > sizeof(unsigned long))
                        r_len = sizeof(unsigned long);
@@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
                        r_len = sizeof(unsigned char);
                r_data = &scp->info_rem;
                break;
+
+       case DSO_STREAM:
+       case DSO_SEQPACKET:
+       case DSO_CONACCEPT:
+       case DSO_CONREJECT:
+       default:
+               return -ENOPROTOOPT;
        }
 
        if (r_data) {
index c586597da20dbb0e46eb0f693fd65bccfc8f3633..7d36a950d9610bed29c789b3c38c8dfa8299d253 100644 (file)
@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
                                            fi->fib_nh, cfg, extack))
                                return 1;
                }
+#ifdef CONFIG_IP_ROUTE_CLASSID
+               if (cfg->fc_flow &&
+                   cfg->fc_flow != fi->fib_nh->nh_tclassid)
+                       return 1;
+#endif
                if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
                    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
                        return 0;
index 008be04ac1cc5e3729ed2265f5cfe3b3b64e20f3..9c41a0cef1a510c8301eaca755d02886e9f389bc 100644 (file)
@@ -1567,10 +1567,7 @@ int ip_getsockopt(struct sock *sk, int level,
                if (get_user(len, optlen))
                        return -EFAULT;
 
-               lock_sock(sk);
-               err = nf_getsockopt(sk, PF_INET, optname, optval,
-                               &len);
-               release_sock(sk);
+               err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
                if (err >= 0)
                        err = put_user(len, optlen);
                return err;
@@ -1602,9 +1599,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
                if (get_user(len, optlen))
                        return -EFAULT;
 
-               lock_sock(sk);
                err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
-               release_sock(sk);
                if (err >= 0)
                        err = put_user(len, optlen);
                return err;
index 4ffe302f9b8200f598f32341257aec4e2a4a6f42..e3e420f3ba7b2de96be867912695efb3ae2b193c 100644 (file)
@@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
                        }
                        if (table_base + v
                            != arpt_next_entry(e)) {
+                               if (unlikely(stackidx >= private->stacksize)) {
+                                       verdict = NF_DROP;
+                                       break;
+                               }
                                jumpstack[stackidx++] = e;
                        }
 
index 9a71f3149507064225d18a4e8ef11780daebe2d2..e38395a8dcf2806677cde272f72b3809c4f404a8 100644 (file)
@@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb,
                                continue;
                        }
                        if (table_base + v != ipt_next_entry(e) &&
-                           !(e->ip.flags & IPT_F_GOTO))
+                           !(e->ip.flags & IPT_F_GOTO)) {
+                               if (unlikely(stackidx >= private->stacksize)) {
+                                       verdict = NF_DROP;
+                                       break;
+                               }
                                jumpstack[stackidx++] = e;
+                       }
 
                        e = get_entry(table_base, v);
                        continue;
index 3a84a60f6b39d4a17007880b3744bde037fca706..4b02ab39ebc54b73f6ec532fa6329fa9df47bb77 100644 (file)
@@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
 
        local_bh_disable();
        if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
-               list_del_rcu(&c->list);
-               spin_unlock(&cn->lock);
-               local_bh_enable();
-
-               unregister_netdevice_notifier(&c->notifier);
-
                /* In case anyone still accesses the file, the open/close
                 * functions are also incrementing the refcount on their own,
                 * so it's safe to remove the entry even if it's in use. */
@@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
                if (cn->procdir)
                        proc_remove(c->pde);
 #endif
+               list_del_rcu(&c->list);
+               spin_unlock(&cn->lock);
+               local_bh_enable();
+
+               unregister_netdevice_notifier(&c->notifier);
+
                return;
        }
        local_bh_enable();
@@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
 #endif
                if (unlikely(!refcount_inc_not_zero(&c->refcount)))
                        c = NULL;
-               else if (entry)
-                       refcount_inc(&c->entries);
+               else if (entry) {
+                       if (unlikely(!refcount_inc_not_zero(&c->entries))) {
+                               clusterip_config_put(c);
+                               c = NULL;
+                       }
+               }
        }
        rcu_read_unlock_bh();
 
index 270765236f5e8cc9e39c02f9b6fa0836f853f96d..aaaf9a81fbc9730050bde35217f47634fd8aca41 100644 (file)
@@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par)
        const struct ipt_ECN_info *einfo = par->targinfo;
        const struct ipt_entry *e = par->entryinfo;
 
-       if (einfo->operation & IPT_ECN_OP_MASK) {
-               pr_info("unsupported ECN operation %x\n", einfo->operation);
+       if (einfo->operation & IPT_ECN_OP_MASK)
                return -EINVAL;
-       }
-       if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
-               pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
+
+       if (einfo->ip_ect & ~IPT_ECN_IP_MASK)
                return -EINVAL;
-       }
+
        if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
            (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
-               pr_info("cannot use TCP operations on a non-tcp rule\n");
+               pr_info_ratelimited("cannot use operation on non-tcp rule\n");
                return -EINVAL;
        }
        return 0;
index 8bd0d7b266320ecf51e0782ad6ccc7d7ad276ebe..e8bed3390e58e0c324daf5202849485bcf5d0fe5 100644 (file)
@@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par)
        const struct ipt_entry *e = par->entryinfo;
 
        if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
-               pr_info("ECHOREPLY no longer supported.\n");
+               pr_info_ratelimited("ECHOREPLY no longer supported.\n");
                return -EINVAL;
        } else if (rejinfo->with == IPT_TCP_RESET) {
                /* Must specify that it's a TCP packet */
                if (e->ip.proto != IPPROTO_TCP ||
                    (e->ip.invflags & XT_INV_PROTO)) {
-                       pr_info("TCP_RESET invalid for non-tcp\n");
+                       pr_info_ratelimited("TCP_RESET invalid for non-tcp\n");
                        return -EINVAL;
                }
        }
index 37fb9552e85898d0ee9b311f22af31563c621de7..fd01f13c896a153c6ec54b2df5503da6c311cf4f 100644 (file)
@@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
        const struct xt_rpfilter_info *info = par->matchinfo;
        unsigned int options = ~XT_RPFILTER_OPTION_MASK;
        if (info->flags & options) {
-               pr_info("unknown options encountered");
+               pr_info_ratelimited("unknown options\n");
                return -EINVAL;
        }
 
        if (strcmp(par->table, "mangle") != 0 &&
            strcmp(par->table, "raw") != 0) {
-               pr_info("match only valid in the \'raw\' "
-                       "or \'mangle\' tables, not \'%s\'.\n", par->table);
+               pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
+                                   par->table);
                return -EINVAL;
        }
 
index 49cc1c1df1bac0c9046f8d34db02c4bef18079e4..a4f44d815a61a392f578faee11ca6f79f66ba4ce 100644 (file)
@@ -1826,6 +1826,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
                                return skb_get_hash_raw(skb) >> 1;
                        memset(&hash_keys, 0, sizeof(hash_keys));
                        skb_flow_dissect_flow_keys(skb, &keys, flag);
+
+                       hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
                        hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
                        hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
                        hash_keys.ports.src = keys.ports.src;
index e9f985e42405a38fc95980da5debb7ac8b51fbb5..6818042cd8a9a1778f54637861647091afd9a769 100644 (file)
@@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
         */
        segs = max_t(u32, bytes / mss_now, min_tso_segs);
 
-       return min_t(u32, segs, sk->sk_gso_max_segs);
+       return segs;
 }
 EXPORT_SYMBOL(tcp_tso_autosize);
 
@@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
        const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
        u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
 
-       return tso_segs ? :
-               tcp_tso_autosize(sk, mss_now,
-                                sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
+       if (!tso_segs)
+               tso_segs = tcp_tso_autosize(sk, mss_now,
+                               sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
+       return min_t(u32, tso_segs, sk->sk_gso_max_segs);
 }
 
 /* Returns the portion of skb which can be sent right away */
@@ -2027,6 +2028,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
        }
 }
 
+static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
+{
+       struct sk_buff *skb, *next;
+
+       skb = tcp_send_head(sk);
+       tcp_for_write_queue_from_safe(skb, next, sk) {
+               if (len <= skb->len)
+                       break;
+
+               if (unlikely(TCP_SKB_CB(skb)->eor))
+                       return false;
+
+               len -= skb->len;
+       }
+
+       return true;
+}
+
 /* Create a new MTU probe if we are ready.
  * MTU probe is regularly attempting to increase the path MTU by
  * deliberately sending larger packets.  This discovers routing
@@ -2099,6 +2118,9 @@ static int tcp_mtu_probe(struct sock *sk)
                        return 0;
        }
 
+       if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
+               return -1;
+
        /* We're allowed to probe.  Build it now. */
        nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
        if (!nskb)
@@ -2134,6 +2156,10 @@ static int tcp_mtu_probe(struct sock *sk)
                        /* We've eaten all the data from this skb.
                         * Throw it away. */
                        TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+                       /* If this is the last SKB we copy and eor is set
+                        * we need to propagate it to the new skb.
+                        */
+                       TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
                        tcp_unlink_write_queue(skb, sk);
                        sk_wmem_free_skb(sk, skb);
                } else {
index bfaefe560b5ce32bbf383469d8f77795d1322ed9..e5ef7c38c934c2f9fdbf368e9815eea9701139e4 100644 (file)
@@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                err = udplite_checksum_init(skb, uh);
                if (err)
                        return err;
+
+               if (UDP_SKB_CB(skb)->partial_cov) {
+                       skb->csum = inet_compute_pseudo(skb, proto);
+                       return 0;
+               }
        }
 
        /* Note, we are only interested in != 0 or == 0, thus the
index ec43d18b5ff91f360869fd348a23a23633cc8e33..547515e8450a1aae48f51a331353d04465a31fa0 100644 (file)
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
                err = udplite_checksum_init(skb, uh);
                if (err)
                        return err;
+
+               if (UDP_SKB_CB(skb)->partial_cov) {
+                       skb->csum = ip6_compute_pseudo(skb, proto);
+                       return 0;
+               }
        }
 
        /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
index d78d41fc4b1a46e4e0a743487770ac4fe7d395b4..24535169663dc501700c95e354761dd824e5a962 100644 (file)
@@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
                if (get_user(len, optlen))
                        return -EFAULT;
 
-               lock_sock(sk);
-               err = nf_getsockopt(sk, PF_INET6, optname, optval,
-                               &len);
-               release_sock(sk);
+               err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
                if (err >= 0)
                        err = put_user(len, optlen);
        }
@@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
                if (get_user(len, optlen))
                        return -EFAULT;
 
-               lock_sock(sk);
-               err = compat_nf_getsockopt(sk, PF_INET6,
-                                          optname, optval, &len);
-               release_sock(sk);
+               err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
                if (err >= 0)
                        err = put_user(len, optlen);
        }
index af4c917e083696559e5347060307e32f23fbc4a9..62358b93bbac5250676a067464c11e4e3d649faa 100644 (file)
@@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb,
                        }
                        if (table_base + v != ip6t_next_entry(e) &&
                            !(e->ipv6.flags & IP6T_F_GOTO)) {
+                               if (unlikely(stackidx >= private->stacksize)) {
+                                       verdict = NF_DROP;
+                                       break;
+                               }
                                jumpstack[stackidx++] = e;
                        }
 
index fa51a205918dbb06731f7300d34921b3c4096737..38dea8ff680fe78e33720147646c5b5e4b557ac5 100644 (file)
@@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
        const struct ip6t_entry *e = par->entryinfo;
 
        if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
-               pr_info("ECHOREPLY is not supported.\n");
+               pr_info_ratelimited("ECHOREPLY is not supported\n");
                return -EINVAL;
        } else if (rejinfo->with == IP6T_TCP_RESET) {
                /* Must specify that it's a TCP packet */
                if (!(e->ipv6.flags & IP6T_F_PROTO) ||
                    e->ipv6.proto != IPPROTO_TCP ||
                    (e->ipv6.invflags & XT_INV_PROTO)) {
-                       pr_info("TCP_RESET illegal for non-tcp\n");
+                       pr_info_ratelimited("TCP_RESET illegal for non-tcp\n");
                        return -EINVAL;
                }
        }
index b12e61b7b16ce9f3f98a0906558c98803a48a9a3..94deb69bbbdaaa34ca14dcaab943233d0a58a8a6 100644 (file)
@@ -103,14 +103,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
        unsigned int options = ~XT_RPFILTER_OPTION_MASK;
 
        if (info->flags & options) {
-               pr_info("unknown options encountered");
+               pr_info_ratelimited("unknown options\n");
                return -EINVAL;
        }
 
        if (strcmp(par->table, "mangle") != 0 &&
            strcmp(par->table, "raw") != 0) {
-               pr_info("match only valid in the \'raw\' "
-                       "or \'mangle\' tables, not \'%s\'.\n", par->table);
+               pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
+                                   par->table);
                return -EINVAL;
        }
 
index 9642164107ce1d544b4a5563cb1eaf319ef95770..33719d5560c8ab4ee565ede2b03a107c06551e51 100644 (file)
@@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par)
        const struct ip6t_srh *srhinfo = par->matchinfo;
 
        if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
-               pr_err("unknown srh match flags  %X\n", srhinfo->mt_flags);
+               pr_info_ratelimited("unknown srh match flags  %X\n",
+                                   srhinfo->mt_flags);
                return -EINVAL;
        }
 
        if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
-               pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags);
+               pr_info_ratelimited("unknown srh invflags %X\n",
+                                   srhinfo->mt_invflags);
                return -EINVAL;
        }
 
index 3873d387713575558801b0352227efd4c4ac45f6..3a1775a62973b7e0b1eeb70ba364eb1b068fdde5 100644 (file)
@@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
 #ifdef CONFIG_IPV6_SIT_6RD
        struct ip_tunnel *t = netdev_priv(dev);
 
-       if (t->dev == sitn->fb_tunnel_dev) {
+       if (dev == sitn->fb_tunnel_dev) {
                ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
                t->ip6rd.relay_prefix = 0;
                t->ip6rd.prefixlen = 16;
index a8b1616cec418e533574ccdc1718df7a8c7cc0a3..1f3188d0384028ef338e86c291441c20493cbcb5 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
                         * driver so reject the timeout update.
                         */
                        status = WLAN_STATUS_REQUEST_DECLINED;
-                       ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
-                                                 tid, dialog_token, status,
-                                                 1, buf_size, timeout);
                        goto end;
                }
 
index 46028e12e216546be92996c71895a06656451d98..f4195a0f027989c4829b2f84d678373894d2fa1c 100644 (file)
@@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
        }
        if (beacon->probe_resp_len) {
                new_beacon->probe_resp_len = beacon->probe_resp_len;
-               beacon->probe_resp = pos;
+               new_beacon->probe_resp = pos;
                memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
                pos += beacon->probe_resp_len;
        }
index 26900025de2f5c3c0d84e146b9827c5073ef53fc..ae9c33cd8adaab4a235e3710026faf437a92fc0a 100644 (file)
@@ -1467,7 +1467,7 @@ struct ieee802_11_elems {
        const struct ieee80211_timeout_interval_ie *timeout_int;
        const u8 *opmode_notif;
        const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
-       const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
+       struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
        const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
 
        /* length of them, respectively */
index 73ac607beb5d704b3c74845baade196d0f66888b..6a381cbe1e33064f71f40110effc8a8fc6ad9ee4 100644 (file)
@@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
 }
 
 static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
-                              struct ieee80211_mgmt *mgmt, size_t len)
+                              struct ieee80211_mgmt *mgmt, size_t len,
+                              struct ieee802_11_elems *elems)
 {
        struct ieee80211_mgmt *mgmt_fwd;
        struct sk_buff *skb;
        struct ieee80211_local *local = sdata->local;
-       u8 *pos = mgmt->u.action.u.chan_switch.variable;
-       size_t offset_ttl;
 
        skb = dev_alloc_skb(local->tx_headroom + len);
        if (!skb)
@@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
        skb_reserve(skb, local->tx_headroom);
        mgmt_fwd = skb_put(skb, len);
 
-       /* offset_ttl is based on whether the secondary channel
-        * offset is available or not. Subtract 1 from the mesh TTL
-        * and disable the initiator flag before forwarding.
-        */
-       offset_ttl = (len < 42) ? 7 : 10;
-       *(pos + offset_ttl) -= 1;
-       *(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
+       elems->mesh_chansw_params_ie->mesh_ttl--;
+       elems->mesh_chansw_params_ie->mesh_flags &=
+               ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
 
        memcpy(mgmt_fwd, mgmt, len);
        eth_broadcast_addr(mgmt_fwd->da);
@@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
 
        /* forward or re-broadcast the CSA frame */
        if (fwd_csa) {
-               if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0)
+               if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
                        mcsa_dbg(sdata, "Failed to forward the CSA frame");
        }
 }
index ee0181778a4297515de9bf0c75a947e7c45924a2..0293348357474ab411d5068218a521e59a54249e 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2008, Intel Corporation
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                                 u32 sta_flags, u8 *bssid,
                                 struct ieee80211_csa_ie *csa_ie)
 {
-       enum nl80211_band new_band;
+       enum nl80211_band new_band = current_band;
        int new_freq;
        u8 new_chan_no;
        struct ieee80211_channel *new_chan;
@@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
                                elems->ext_chansw_ie->new_operating_class,
                                &new_band)) {
                        sdata_info(sdata,
-                                  "cannot understand ECSA IE operating class %d, disconnecting\n",
+                                  "cannot understand ECSA IE operating class, %d, ignoring\n",
                                   elems->ext_chansw_ie->new_operating_class);
-                       return -EINVAL;
                }
                new_chan_no = elems->ext_chansw_ie->new_ch_num;
                csa_ie->count = elems->ext_chansw_ie->count;
                csa_ie->mode = elems->ext_chansw_ie->mode;
        } else if (elems->ch_switch_ie) {
-               new_band = current_band;
                new_chan_no = elems->ch_switch_ie->new_ch_num;
                csa_ie->count = elems->ch_switch_ie->count;
                csa_ie->mode = elems->ch_switch_ie->mode;
index 0c5627f8a104e17fb54f55c09da597ef84af5be3..af0b608ee8ed11bdab91a163e4f8e8036f991185 100644 (file)
@@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        if (ieee80211_hw_check(hw, USES_RSS)) {
                sta->pcpu_rx_stats =
-                       alloc_percpu(struct ieee80211_sta_rx_stats);
+                       alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
                if (!sta->pcpu_rx_stats)
                        goto free;
        }
@@ -433,6 +433,7 @@ free_txq:
        if (sta->sta.txq[0])
                kfree(to_txq_info(sta->sta.txq[0]));
 free:
+       free_percpu(sta->pcpu_rx_stats);
 #ifdef CONFIG_MAC80211_MESH
        kfree(sta->mesh);
 #endif
index fbce552a796e14a6249e6e9a5d0d2ea16aaf3f09..7d7466dbf66338f817bb6698b9dbd637de26d3ed 100644 (file)
@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
                                 const struct nf_conn *ct,
                                 u16 *rover)
 {
-       unsigned int range_size, min, i;
+       unsigned int range_size, min, max, i;
        __be16 *portptr;
        u_int16_t off;
 
@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
                }
        } else {
                min = ntohs(range->min_proto.all);
-               range_size = ntohs(range->max_proto.all) - min + 1;
+               max = ntohs(range->max_proto.all);
+               if (unlikely(max < min))
+                       swap(max, min);
+               range_size = max - min + 1;
        }
 
        if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
index 2f685ee1f9c87dc6cee3b1c333e7d62ad36de806..fa1655aff8d3febbf9983719cd8d5d7eda126891 100644 (file)
@@ -434,36 +434,35 @@ int xt_check_match(struct xt_mtchk_param *par,
                 * ebt_among is exempt from centralized matchsize checking
                 * because it uses a dynamic-size data set.
                 */
-               pr_err("%s_tables: %s.%u match: invalid size "
-                      "%u (kernel) != (user) %u\n",
-                      xt_prefix[par->family], par->match->name,
-                      par->match->revision,
-                      XT_ALIGN(par->match->matchsize), size);
+               pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
+                                  xt_prefix[par->family], par->match->name,
+                                  par->match->revision,
+                                  XT_ALIGN(par->match->matchsize), size);
                return -EINVAL;
        }
        if (par->match->table != NULL &&
            strcmp(par->match->table, par->table) != 0) {
-               pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
-                      xt_prefix[par->family], par->match->name,
-                      par->match->table, par->table);
+               pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
+                                   xt_prefix[par->family], par->match->name,
+                                   par->match->table, par->table);
                return -EINVAL;
        }
        if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
                char used[64], allow[64];
 
-               pr_err("%s_tables: %s match: used from hooks %s, but only "
-                      "valid from %s\n",
-                      xt_prefix[par->family], par->match->name,
-                      textify_hooks(used, sizeof(used), par->hook_mask,
-                                    par->family),
-                      textify_hooks(allow, sizeof(allow), par->match->hooks,
-                                    par->family));
+               pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
+                                   xt_prefix[par->family], par->match->name,
+                                   textify_hooks(used, sizeof(used),
+                                                 par->hook_mask, par->family),
+                                   textify_hooks(allow, sizeof(allow),
+                                                 par->match->hooks,
+                                                 par->family));
                return -EINVAL;
        }
        if (par->match->proto && (par->match->proto != proto || inv_proto)) {
-               pr_err("%s_tables: %s match: only valid for protocol %u\n",
-                      xt_prefix[par->family], par->match->name,
-                      par->match->proto);
+               pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
+                                   xt_prefix[par->family], par->match->name,
+                                   par->match->proto);
                return -EINVAL;
        }
        if (par->match->checkentry != NULL) {
@@ -814,36 +813,35 @@ int xt_check_target(struct xt_tgchk_param *par,
        int ret;
 
        if (XT_ALIGN(par->target->targetsize) != size) {
-               pr_err("%s_tables: %s.%u target: invalid size "
-                      "%u (kernel) != (user) %u\n",
-                      xt_prefix[par->family], par->target->name,
-                      par->target->revision,
-                      XT_ALIGN(par->target->targetsize), size);
+               pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
+                                  xt_prefix[par->family], par->target->name,
+                                  par->target->revision,
+                                  XT_ALIGN(par->target->targetsize), size);
                return -EINVAL;
        }
        if (par->target->table != NULL &&
            strcmp(par->target->table, par->table) != 0) {
-               pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
-                      xt_prefix[par->family], par->target->name,
-                      par->target->table, par->table);
+               pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
+                                   xt_prefix[par->family], par->target->name,
+                                   par->target->table, par->table);
                return -EINVAL;
        }
        if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
                char used[64], allow[64];
 
-               pr_err("%s_tables: %s target: used from hooks %s, but only "
-                      "usable from %s\n",
-                      xt_prefix[par->family], par->target->name,
-                      textify_hooks(used, sizeof(used), par->hook_mask,
-                                    par->family),
-                      textify_hooks(allow, sizeof(allow), par->target->hooks,
-                                    par->family));
+               pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
+                                   xt_prefix[par->family], par->target->name,
+                                   textify_hooks(used, sizeof(used),
+                                                 par->hook_mask, par->family),
+                                   textify_hooks(allow, sizeof(allow),
+                                                 par->target->hooks,
+                                                 par->family));
                return -EINVAL;
        }
        if (par->target->proto && (par->target->proto != proto || inv_proto)) {
-               pr_err("%s_tables: %s target: only valid for protocol %u\n",
-                      xt_prefix[par->family], par->target->name,
-                      par->target->proto);
+               pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
+                                   xt_prefix[par->family], par->target->name,
+                                   par->target->proto);
                return -EINVAL;
        }
        if (par->target->checkentry != NULL) {
@@ -1004,10 +1002,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
        if (sz < sizeof(*info))
                return NULL;
 
-       /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
-       if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
-               return NULL;
-
        /* __GFP_NORETRY is not fully supported by kvmalloc but it should
         * work reasonably well if sz is too large and bail out rather
         * than shoot all processes down before realizing there is nothing
index c502419d63061c7aa5d3205187e7429294b3cc97..f368ee6741db556ec06aa3569b21872acfeb710d 100644 (file)
@@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par)
        const struct xt_audit_info *info = par->targinfo;
 
        if (info->type > XT_AUDIT_TYPE_MAX) {
-               pr_info("Audit type out of range (valid range: 0..%hhu)\n",
-                       XT_AUDIT_TYPE_MAX);
+               pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n",
+                                   XT_AUDIT_TYPE_MAX);
                return -ERANGE;
        }
 
index 0f642ef8cd2669e737dd0cdb5958abcced8be7d2..9f4151ec3e06e73460243f2b0c5c49762b37bb6d 100644 (file)
@@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
        const struct xt_CHECKSUM_info *einfo = par->targinfo;
 
        if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
-               pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
+               pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
+                                   einfo->operation);
                return -EINVAL;
        }
-       if (!einfo->operation) {
-               pr_info("no CHECKSUM operation enabled\n");
+       if (!einfo->operation)
                return -EINVAL;
-       }
+
        return 0;
 }
 
index da56c06a443c0bcf0dd1e767b4d4848179a7ce73..f3f1caac949bad44db0fd992b7a74d27f216cbed 100644 (file)
@@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
 
        if (strcmp(par->table, "mangle") != 0 &&
            strcmp(par->table, "security") != 0) {
-               pr_info("target only valid in the \'mangle\' "
-                       "or \'security\' tables, not \'%s\'.\n", par->table);
+               pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+                                   par->table);
                return -EINVAL;
        }
 
@@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
                break;
 
        default:
-               pr_info("invalid mode: %hu\n", info->mode);
+               pr_info_ratelimited("invalid mode: %hu\n", info->mode);
                return -EINVAL;
        }
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
        return ret;
 }
 
index 5a152e2acfd5816718981c2db470ce3fa69b1ce6..8790190c6feb3cf1bce5577319fc043a5c55dc2c 100644 (file)
@@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
 
        proto = xt_ct_find_proto(par);
        if (!proto) {
-               pr_info("You must specify a L4 protocol, and not use "
-                       "inversions on it.\n");
+               pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n");
                return -ENOENT;
        }
 
        helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
                                                    proto);
        if (helper == NULL) {
-               pr_info("No such helper \"%s\"\n", helper_name);
+               pr_info_ratelimited("No such helper \"%s\"\n", helper_name);
                return -ENOENT;
        }
 
@@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
        const struct nf_conntrack_l4proto *l4proto;
        struct ctnl_timeout *timeout;
        struct nf_conn_timeout *timeout_ext;
+       const char *errmsg = NULL;
        int ret = 0;
        u8 proto;
 
@@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
        timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
        if (timeout_find_get == NULL) {
                ret = -ENOENT;
-               pr_info("Timeout policy base is empty\n");
+               errmsg = "Timeout policy base is empty";
                goto out;
        }
 
        proto = xt_ct_find_proto(par);
        if (!proto) {
                ret = -EINVAL;
-               pr_info("You must specify a L4 protocol, and not use "
-                       "inversions on it.\n");
+               errmsg = "You must specify a L4 protocol and not use inversions on it";
                goto out;
        }
 
        timeout = timeout_find_get(par->net, timeout_name);
        if (timeout == NULL) {
                ret = -ENOENT;
-               pr_info("No such timeout policy \"%s\"\n", timeout_name);
+               pr_info_ratelimited("No such timeout policy \"%s\"\n",
+                                   timeout_name);
                goto out;
        }
 
        if (timeout->l3num != par->family) {
                ret = -EINVAL;
-               pr_info("Timeout policy `%s' can only be used by L3 protocol "
-                       "number %d\n", timeout_name, timeout->l3num);
+               pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
+                                   timeout_name, 3, timeout->l3num);
                goto err_put_timeout;
        }
        /* Make sure the timeout policy matches any existing protocol tracker,
@@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
        l4proto = __nf_ct_l4proto_find(par->family, proto);
        if (timeout->l4proto->l4proto != l4proto->l4proto) {
                ret = -EINVAL;
-               pr_info("Timeout policy `%s' can only be used by L4 protocol "
-                       "number %d\n",
-                       timeout_name, timeout->l4proto->l4proto);
+               pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
+                                   timeout_name, 4, timeout->l4proto->l4proto);
                goto err_put_timeout;
        }
        timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
@@ -180,6 +179,8 @@ err_put_timeout:
        __xt_ct_tg_timeout_put(timeout);
 out:
        rcu_read_unlock();
+       if (errmsg)
+               pr_info_ratelimited("%s\n", errmsg);
        return ret;
 #else
        return -EOPNOTSUPP;
index 3f83d38c4e5bb975f80a6834965a18c92f254dcc..098ed851b7a782e15caa1c1e7ff52a7c909b340a 100644 (file)
@@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_DSCP_info *info = par->targinfo;
 
-       if (info->dscp > XT_DSCP_MAX) {
-               pr_info("dscp %x out of range\n", info->dscp);
+       if (info->dscp > XT_DSCP_MAX)
                return -EDOM;
-       }
        return 0;
 }
 
index 1535e87ed9bd4fa47a7f431afab82320cf5e8530..4653b071bed41cc853572a25cc57271f060277e3 100644 (file)
@@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par)
 {
        const struct ipt_TTL_info *info = par->targinfo;
 
-       if (info->mode > IPT_TTL_MAXMODE) {
-               pr_info("TTL: invalid or unknown mode %u\n", info->mode);
+       if (info->mode > IPT_TTL_MAXMODE)
                return -EINVAL;
-       }
        if (info->mode != IPT_TTL_SET && info->ttl == 0)
                return -EINVAL;
        return 0;
@@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par)
 {
        const struct ip6t_HL_info *info = par->targinfo;
 
-       if (info->mode > IP6T_HL_MAXMODE) {
-               pr_info("invalid or unknown mode %u\n", info->mode);
+       if (info->mode > IP6T_HL_MAXMODE)
                return -EINVAL;
-       }
-       if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
-               pr_info("increment/decrement does not "
-                       "make sense with value 0\n");
+       if (info->mode != IP6T_HL_SET && info->hop_limit == 0)
                return -EINVAL;
-       }
        return 0;
 }
 
index 60e6dbe124605569f249f0e15c7304b231fe1500..9c75f419cd80446694e4661ebb676b264f85cd1f 100644 (file)
@@ -9,6 +9,8 @@
  * the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/icmp.h>
@@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
 static int hmark_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_hmark_info *info = par->targinfo;
+       const char *errmsg = "proto mask must be zero with L3 mode";
 
-       if (!info->hmodulus) {
-               pr_info("xt_HMARK: hash modulus can't be zero\n");
+       if (!info->hmodulus)
                return -EINVAL;
-       }
+
        if (info->proto_mask &&
-           (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
-               pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
-               return -EINVAL;
-       }
+           (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
+               goto err;
+
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
            (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
-                            XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
-               pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
+                            XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
                return -EINVAL;
-       }
+
        if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
            (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
                             XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
-               pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
-               return -EINVAL;
+               errmsg = "spi-set and port-set can't be combined";
+               goto err;
        }
        return 0;
+err:
+       pr_info_ratelimited("%s\n", errmsg);
+       return -EINVAL;
 }
 
 static struct xt_target hmark_tg_reg[] __read_mostly = {
index 6c2482b709b1eca341926a8393f45dfead358561..1ac6600bfafd60b6b4d5aaf88a41fb57c6ec195b 100644 (file)
@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
        timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
        info->timer->refcnt = 1;
 
+       INIT_WORK(&info->timer->work, idletimer_tg_work);
+
        mod_timer(&info->timer->timer,
                  msecs_to_jiffies(info->timeout * 1000) + jiffies);
 
-       INIT_WORK(&info->timer->work, idletimer_tg_work);
-
        return 0;
 
 out_free_attr:
@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
                pr_debug("timeout value is zero\n");
                return -EINVAL;
        }
-
+       if (info->timeout >= INT_MAX / 1000) {
+               pr_debug("timeout value is too big\n");
+               return -EINVAL;
+       }
        if (info->label[0] == '\0' ||
            strnlen(info->label,
                    MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
index 1dcad893df781395fe2773b58e99e615b84ffcc5..19846445504dcab1ada188224c4de67139df8dc6 100644 (file)
@@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par)
        struct xt_led_info_internal *ledinternal;
        int err;
 
-       if (ledinfo->id[0] == '\0') {
-               pr_info("No 'id' parameter given.\n");
+       if (ledinfo->id[0] == '\0')
                return -EINVAL;
-       }
 
        mutex_lock(&xt_led_mutex);
 
@@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par)
 
        err = led_trigger_register(&ledinternal->netfilter_led_trigger);
        if (err) {
-               pr_err("Trigger name is already in use.\n");
+               pr_info_ratelimited("Trigger name is already in use.\n");
                goto exit_alloc;
        }
 
-       /* See if we need to set up a timer */
-       if (ledinfo->delay > 0)
-               timer_setup(&ledinternal->timer, led_timeout_callback, 0);
+       /* Since the letinternal timer can be shared between multiple targets,
+        * always set it up, even if the current target does not need it
+        */
+       timer_setup(&ledinternal->timer, led_timeout_callback, 0);
 
        list_add_tail(&ledinternal->list, &xt_led_triggers);
 
@@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
 
        list_del(&ledinternal->list);
 
-       if (ledinfo->delay > 0)
-               del_timer_sync(&ledinternal->timer);
+       del_timer_sync(&ledinternal->timer);
 
        led_trigger_unregister(&ledinternal->netfilter_led_trigger);
 
index a360b99a958af224a2aa98fb6fe080a885b733ef..a9aca80a32aeb4c9b92757a20710960b094aa7aa 100644 (file)
@@ -8,6 +8,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 
@@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
        init_hashrandom(&jhash_initval);
 
        if (info->queues_total == 0) {
-               pr_err("NFQUEUE: number of total queues is 0\n");
+               pr_info_ratelimited("number of total queues is 0\n");
                return -EINVAL;
        }
        maxid = info->queues_total - 1 + info->queuenum;
        if (maxid > 0xffff) {
-               pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
-                      info->queues_total, maxid);
+               pr_info_ratelimited("number of queues (%u) out of range (got %u)\n",
+                                   info->queues_total, maxid);
                return -ERANGE;
        }
        if (par->target->revision == 2 && info->flags > 1)
index 9faf5e050b796186b3204a02ece181726a26cb1a..4ad5fe27e08bcc6732f8f8a9977530b908430578 100644 (file)
@@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
                                       &info->secid);
        if (err) {
                if (err == -EINVAL)
-                       pr_info("invalid security context \'%s\'\n", info->secctx);
+                       pr_info_ratelimited("invalid security context \'%s\'\n",
+                                           info->secctx);
                return err;
        }
 
        if (!info->secid) {
-               pr_info("unable to map security context \'%s\'\n", info->secctx);
+               pr_info_ratelimited("unable to map security context \'%s\'\n",
+                                   info->secctx);
                return -ENOENT;
        }
 
        err = security_secmark_relabel_packet(info->secid);
        if (err) {
-               pr_info("unable to obtain relabeling permission\n");
+               pr_info_ratelimited("unable to obtain relabeling permission\n");
                return err;
        }
 
@@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
 
        if (strcmp(par->table, "mangle") != 0 &&
            strcmp(par->table, "security") != 0) {
-               pr_info("target only valid in the \'mangle\' "
-                       "or \'security\' tables, not \'%s\'.\n", par->table);
+               pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
+                                   par->table);
                return -EINVAL;
        }
 
        if (mode && mode != info->mode) {
-               pr_info("mode already set to %hu cannot mix with "
-                       "rules for mode %hu\n", mode, info->mode);
+               pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
+                                   mode, info->mode);
                return -EINVAL;
        }
 
@@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
        case SECMARK_MODE_SEL:
                break;
        default:
-               pr_info("invalid mode: %hu\n", info->mode);
+               pr_info_ratelimited("invalid mode: %hu\n", info->mode);
                return -EINVAL;
        }
 
index 99bb8e410f229e3a8233ce60192c2ca0a4483b84..98efb202f8b4a14d329df1cc4614ac4088fbf151 100644 (file)
@@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
            (par->hook_mask & ~((1 << NF_INET_FORWARD) |
                           (1 << NF_INET_LOCAL_OUT) |
                           (1 << NF_INET_POST_ROUTING))) != 0) {
-               pr_info("path-MTU clamping only supported in "
-                       "FORWARD, OUTPUT and POSTROUTING hooks\n");
+               pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
                return -EINVAL;
        }
        if (par->nft_compat)
@@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
                        return 0;
-       pr_info("Only works on TCP SYN packets\n");
+       pr_info_ratelimited("Only works on TCP SYN packets\n");
        return -EINVAL;
 }
 
@@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
            (par->hook_mask & ~((1 << NF_INET_FORWARD) |
                           (1 << NF_INET_LOCAL_OUT) |
                           (1 << NF_INET_POST_ROUTING))) != 0) {
-               pr_info("path-MTU clamping only supported in "
-                       "FORWARD, OUTPUT and POSTROUTING hooks\n");
+               pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
                return -EINVAL;
        }
        if (par->nft_compat)
@@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
                        return 0;
-       pr_info("Only works on TCP SYN packets\n");
+       pr_info_ratelimited("Only works on TCP SYN packets\n");
        return -EINVAL;
 }
 #endif
index 17d7705e3bd41d5bbaf0ff6aba98a56f472a8388..8c89323c06afed8232e6e227683d8faa3dabc3a7 100644 (file)
@@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
            !(i->invflags & IP6T_INV_PROTO))
                return 0;
 
-       pr_info("Can be used only in combination with "
-               "either -p tcp or -p udp\n");
+       pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
        return -EINVAL;
 }
 #endif
@@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par)
            && !(i->invflags & IPT_INV_PROTO))
                return 0;
 
-       pr_info("Can be used only in combination with "
-               "either -p tcp or -p udp\n");
+       pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
        return -EINVAL;
 }
 
index 911a7c0da5040c6aa0ea30e117ac7571b9129ce5..89e281b3bfc24eece9e485375fc0a59825089298 100644 (file)
@@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
 
 static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
 {
+       const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
        struct xt_addrtype_info_v1 *info = par->matchinfo;
 
        if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
-           info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
-               pr_info("both incoming and outgoing "
-                       "interface limitation cannot be selected\n");
-               return -EINVAL;
-       }
+           info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
+               goto err;
 
        if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
            (1 << NF_INET_LOCAL_IN)) &&
            info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
-               pr_info("output interface limitation "
-                       "not valid in PREROUTING and INPUT\n");
-               return -EINVAL;
+               errmsg = "output interface limitation not valid in PREROUTING and INPUT";
+               goto err;
        }
 
        if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
            (1 << NF_INET_LOCAL_OUT)) &&
            info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
-               pr_info("input interface limitation "
-                       "not valid in POSTROUTING and OUTPUT\n");
-               return -EINVAL;
+               errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT";
+               goto err;
        }
 
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        if (par->family == NFPROTO_IPV6) {
                if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
-                       pr_err("ipv6 BLACKHOLE matching not supported\n");
-                       return -EINVAL;
+                       errmsg = "ipv6 BLACKHOLE matching not supported";
+                       goto err;
                }
                if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
-                       pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
-                       return -EINVAL;
+                       errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported";
+                       goto err;
                }
                if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
-                       pr_err("ipv6 does not support BROADCAST matching\n");
-                       return -EINVAL;
+                       errmsg = "ipv6 does not support BROADCAST matching";
+                       goto err;
                }
        }
 #endif
        return 0;
+err:
+       pr_info_ratelimited("%s\n", errmsg);
+       return -EINVAL;
 }
 
 static struct xt_match addrtype_mt_reg[] __read_mostly = {
index 06b090d8e9014d6bf6adfb742b5c2620197b98fb..a2cf8a6236d63deb1516dd6aea88161ac12b7022 100644 (file)
@@ -7,6 +7,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/syscalls.h>
 #include <linux/skbuff.h>
@@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
        program.filter = insns;
 
        if (bpf_prog_create(ret, &program)) {
-               pr_info("bpf: check failed: parse error\n");
+               pr_info_ratelimited("check failed: parse error\n");
                return -EINVAL;
        }
 
index 891f4e7e8ea7f507eebf1a22fa5dd818fc3ab49c..7df2dece57d30f6c4e921cf3eeff40f5319b672a 100644 (file)
@@ -12,6 +12,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/skbuff.h>
 #include <linux/module.h>
 #include <linux/netfilter/x_tables.h>
@@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
        }
 
        if (info->has_path && info->has_classid) {
-               pr_info("xt_cgroup: both path and classid specified\n");
+               pr_info_ratelimited("path and classid specified\n");
                return -EINVAL;
        }
 
@@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
        if (info->has_path) {
                cgrp = cgroup_get_from_path(info->path);
                if (IS_ERR(cgrp)) {
-                       pr_info("xt_cgroup: invalid path, errno=%ld\n",
-                               PTR_ERR(cgrp));
+                       pr_info_ratelimited("invalid path, errno=%ld\n",
+                                           PTR_ERR(cgrp));
                        return -EINVAL;
                }
                info->priv = cgrp;
index 57ef175dfbfaa86db8368c10fbbd51ba3b2a691c..0068688995c82def03435c153d77cdb687517685 100644 (file)
@@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
        struct xt_cluster_match_info *info = par->matchinfo;
 
        if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
-               pr_info("you have exceeded the maximum "
-                       "number of cluster nodes (%u > %u)\n",
-                       info->total_nodes, XT_CLUSTER_NODES_MAX);
+               pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
+                                   info->total_nodes, XT_CLUSTER_NODES_MAX);
                return -EINVAL;
        }
        if (info->node_mask >= (1ULL << info->total_nodes)) {
-               pr_info("this node mask cannot be "
-                       "higher than the total number of nodes\n");
+               pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
                return -EDOM;
        }
        return 0;
index cad0b7b5eb35654d066072d1b7cf31510e98aa45..93cb018c3055f8fb660a2558fd86cac07285d47b 100644 (file)
@@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
 
        /*
         * This filter cannot function correctly unless connection tracking
index 23372879e6e3004398454f6c96944422db8e1cc7..4fa4efd243532a5507cf9eba4a8a08cb7e63e01b 100644 (file)
@@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
        int ret;
 
        if (info->options & ~options) {
-               pr_err("Unknown options in mask %x\n", info->options);
+               pr_info_ratelimited("Unknown options in mask %x\n",
+                                   info->options);
                return -EINVAL;
        }
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0) {
-               pr_info("cannot load conntrack support for proto=%u\n",
-                                                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
                return ret;
        }
 
index ec377cc6a369c71025136fc4b8b3cf3fce177eea..809639ce6f5a480c92718026cd91e6933d890817 100644 (file)
@@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
        return ret;
 }
 
@@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
        return ret;
 }
 
index 39cf1d019240e61f504bc8ced96a63c41c9bd839..df80fe7d391c0651ec3d874807d4c8985bdcedc5 100644 (file)
@@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
        return ret;
 }
 
index 236ac8008909d3abdf1d236406fc0bb0bb48dffd..a4c2b862f820af17ae9e36bab0e6cede0dc58cfc 100644 (file)
@@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par)
 {
        const struct xt_dscp_info *info = par->matchinfo;
 
-       if (info->dscp > XT_DSCP_MAX) {
-               pr_info("dscp %x out of range\n", info->dscp);
+       if (info->dscp > XT_DSCP_MAX)
                return -EDOM;
-       }
 
        return 0;
 }
index 3c831a8efebc65840af55347c880cd188a465435..c7ad4afa5fb8cc8e455d47b50193e8f81930cdfb 100644 (file)
@@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par)
 
        if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
            (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
-               pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+               pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
                return -EINVAL;
        }
 
@@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par)
 
        if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
            (ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
-               pr_info("cannot match TCP bits in rule for non-tcp packets\n");
+               pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
                return -EINVAL;
        }
 
index ca6847403ca218c478d208080aab3e3c92a0a615..66f5aca62a087ee816b6c18e0c385af535cf4ae7 100644 (file)
@@ -523,7 +523,8 @@ static u64 user2rate(u64 user)
        if (user != 0) {
                return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
        } else {
-               pr_warn("invalid rate from userspace: %llu\n", user);
+               pr_info_ratelimited("invalid rate from userspace: %llu\n",
+                                   user);
                return 0;
        }
 }
@@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
                if (!dh->rateinfo.prev_window &&
                    (dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
                        spin_unlock(&dh->lock);
-                       rcu_read_unlock_bh();
+                       local_bh_enable();
                        return !(cfg->mode & XT_HASHLIMIT_INVERT);
                } else {
                        goto overlimit;
@@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
        }
 
        if (cfg->mode & ~XT_HASHLIMIT_ALL) {
-               pr_info("Unknown mode mask %X, kernel too old?\n",
-                                               cfg->mode);
+               pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n",
+                                   cfg->mode);
                return -EINVAL;
        }
 
        /* Check for overflow. */
        if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
                if (cfg->avg == 0 || cfg->avg > U32_MAX) {
-                       pr_info("hashlimit invalid rate\n");
+                       pr_info_ratelimited("invalid rate\n");
                        return -ERANGE;
                }
 
                if (cfg->interval == 0) {
-                       pr_info("hashlimit invalid interval\n");
+                       pr_info_ratelimited("invalid interval\n");
                        return -EINVAL;
                }
        } else if (cfg->mode & XT_HASHLIMIT_BYTES) {
                if (user2credits_byte(cfg->avg) == 0) {
-                       pr_info("overflow, rate too high: %llu\n", cfg->avg);
+                       pr_info_ratelimited("overflow, rate too high: %llu\n",
+                                           cfg->avg);
                        return -EINVAL;
                }
        } else if (cfg->burst == 0 ||
-                   user2credits(cfg->avg * cfg->burst, revision) <
-                   user2credits(cfg->avg, revision)) {
-                       pr_info("overflow, try lower: %llu/%llu\n",
-                               cfg->avg, cfg->burst);
-                       return -ERANGE;
+                  user2credits(cfg->avg * cfg->burst, revision) <
+                  user2credits(cfg->avg, revision)) {
+               pr_info_ratelimited("overflow, try lower: %llu/%llu\n",
+                                   cfg->avg, cfg->burst);
+               return -ERANGE;
        }
 
        mutex_lock(&hashlimit_mutex);
index 38a78151c0e99124b3071374b99897a1eefffdd5..fd077aeaaed951497d6a40b701efc734bf026b27 100644 (file)
@@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0) {
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
                return ret;
        }
        info->name[sizeof(info->name) - 1] = '\0';
index 7ca64a50db04d0144b8f38f9c5bedb9c30958f6a..57f1df5757011d227fc6914f82b7a8dff7469e11 100644 (file)
@@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par)
 
        /* Must specify no unknown invflags */
        if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
-               pr_err("unknown flags %X\n", compinfo->invflags);
+               pr_info_ratelimited("unknown flags %X\n", compinfo->invflags);
                return -EINVAL;
        }
        return 0;
index 42540d26c2b8ec34ab62422fe5894588df2306b6..1d950a6100af16b3d248d63201bd5a1874532272 100644 (file)
@@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par)
            && par->family != NFPROTO_IPV6
 #endif
                ) {
-               pr_info("protocol family %u not supported\n", par->family);
+               pr_info_ratelimited("protocol family %u not supported\n",
+                                   par->family);
                return -EINVAL;
        }
 
index 8aee572771f2b698a2f8e12a226be7af71adbd04..c43482bf48e687aba3a0c9d8d7b5c5f74d924485 100644 (file)
@@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
        /* Check for invalid flags */
        if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
                            XT_L2TP_TYPE)) {
-               pr_info("unknown flags: %x\n", info->flags);
+               pr_info_ratelimited("unknown flags: %x\n", info->flags);
                return -EINVAL;
        }
 
@@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
            (!(info->flags & XT_L2TP_SID)) &&
            ((!(info->flags & XT_L2TP_TYPE)) ||
             (info->type != XT_L2TP_TYPE_CONTROL))) {
-               pr_info("invalid flags combination: %x\n", info->flags);
+               pr_info_ratelimited("invalid flags combination: %x\n",
+                                   info->flags);
                return -EINVAL;
        }
 
@@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
         */
        if (info->flags & XT_L2TP_VERSION) {
                if ((info->version < 2) || (info->version > 3)) {
-                       pr_info("wrong L2TP version: %u\n", info->version);
+                       pr_info_ratelimited("wrong L2TP version: %u\n",
+                                           info->version);
                        return -EINVAL;
                }
 
                if (info->version == 2) {
                        if ((info->flags & XT_L2TP_TID) &&
                            (info->tid > 0xffff)) {
-                               pr_info("v2 tid > 0xffff: %u\n", info->tid);
+                               pr_info_ratelimited("v2 tid > 0xffff: %u\n",
+                                                   info->tid);
                                return -EINVAL;
                        }
                        if ((info->flags & XT_L2TP_SID) &&
                            (info->sid > 0xffff)) {
-                               pr_info("v2 sid > 0xffff: %u\n", info->sid);
+                               pr_info_ratelimited("v2 sid > 0xffff: %u\n",
+                                                   info->sid);
                                return -EINVAL;
                        }
                }
@@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par)
 
        if ((ip->proto != IPPROTO_UDP) &&
            (ip->proto != IPPROTO_L2TP)) {
-               pr_info("missing protocol rule (udp|l2tpip)\n");
+               pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
                return -EINVAL;
        }
 
        if ((ip->proto == IPPROTO_L2TP) &&
            (info->version == 2)) {
-               pr_info("v2 doesn't support IP mode\n");
+               pr_info_ratelimited("v2 doesn't support IP mode\n");
                return -EINVAL;
        }
 
@@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par)
 
        if ((ip->proto != IPPROTO_UDP) &&
            (ip->proto != IPPROTO_L2TP)) {
-               pr_info("missing protocol rule (udp|l2tpip)\n");
+               pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
                return -EINVAL;
        }
 
        if ((ip->proto == IPPROTO_L2TP) &&
            (info->version == 2)) {
-               pr_info("v2 doesn't support IP mode\n");
+               pr_info_ratelimited("v2 doesn't support IP mode\n");
                return -EINVAL;
        }
 
index 61403b77361cbfdf55db2a6c824a21003fb82b22..55d18cd676356cc98985929c79a9436a091fe226 100644 (file)
@@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
        /* Check for overflow. */
        if (r->burst == 0
            || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
-               pr_info("Overflow, try lower: %u/%u\n",
-                       r->avg, r->burst);
+               pr_info_ratelimited("Overflow, try lower: %u/%u\n",
+                                   r->avg, r->burst);
                return -ERANGE;
        }
 
index 0fd14d1eb09d14ba3c1797c33b022e3e0c50c97b..bdb689cdc829df85372dbdaa0acf47c6772e47d8 100644 (file)
@@ -8,6 +8,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
@@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
        const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
 
        if (mr->rangesize != 1) {
-               pr_info("%s: multiple ranges no longer supported\n",
-                       par->target->name);
+               pr_info_ratelimited("multiple ranges no longer supported\n");
                return -EINVAL;
        }
        return nf_ct_netns_get(par->net, par->family);
index 6f92d25590a85d8db7bb6ccec30a298f7206448e..c8674deed4eb43c024438308284f53a270e693ab 100644 (file)
@@ -6,6 +6,8 @@
  * it under the terms of the GNU General Public License version 2 (or any
  * later at your option) as published by the Free Software Foundation.
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/skbuff.h>
 
@@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
 
        nfacct = nfnl_acct_find_get(par->net, info->name);
        if (nfacct == NULL) {
-               pr_info("xt_nfacct: accounting object with name `%s' "
-                       "does not exists\n", info->name);
+               pr_info_ratelimited("accounting object `%s' does not exists\n",
+                                   info->name);
                return -ENOENT;
        }
        info->nfacct = nfacct;
index bb33598e4530de1b468fa66ff292fb2283c6d517..9d6d67b953ac8f6d6b02c9b428e960f14a7dbcda 100644 (file)
@@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
             info->invert & XT_PHYSDEV_OP_BRIDGED) &&
            par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
            (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
-               pr_info("using --physdev-out and --physdev-is-out are only "
-                       "supported in the FORWARD and POSTROUTING chains with "
-                       "bridged traffic.\n");
+               pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
                if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
                        return -EINVAL;
        }
index 5639fb03bdd924812f08179ab88dfeaba2d8ffc9..13f8ccf946d622a21a057381839e65beb5e97c20 100644 (file)
@@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int policy_mt_check(const struct xt_mtchk_param *par)
 {
        const struct xt_policy_info *info = par->matchinfo;
+       const char *errmsg = "neither incoming nor outgoing policy selected";
+
+       if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
+               goto err;
 
-       if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
-               pr_info("neither incoming nor outgoing policy selected\n");
-               return -EINVAL;
-       }
        if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
            (1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
-               pr_info("output policy not valid in PREROUTING and INPUT\n");
-               return -EINVAL;
+               errmsg = "output policy not valid in PREROUTING and INPUT";
+               goto err;
        }
        if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
            (1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
-               pr_info("input policy not valid in POSTROUTING and OUTPUT\n");
-               return -EINVAL;
+               errmsg = "input policy not valid in POSTROUTING and OUTPUT";
+               goto err;
        }
        if (info->len > XT_POLICY_MAX_ELEM) {
-               pr_info("too many policy elements\n");
-               return -EINVAL;
+               errmsg = "too many policy elements";
+               goto err;
        }
        return 0;
+err:
+       pr_info_ratelimited("%s\n", errmsg);
+       return -EINVAL;
 }
 
 static struct xt_match policy_mt_reg[] __read_mostly = {
index 245fa350a7a85390e6767c4a0c5862f4213000fe..6d232d18faff72ec8c26bd82dc7685fec52c5c6a 100644 (file)
@@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        net_get_random_once(&hash_rnd, sizeof(hash_rnd));
 
        if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
-               pr_info("Unsupported user space flags (%08x)\n",
-                       info->check_set);
+               pr_info_ratelimited("Unsupported userspace flags (%08x)\n",
+                                   info->check_set);
                return -EINVAL;
        }
        if (hweight8(info->check_set &
@@ -357,8 +357,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
        if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
                return -EINVAL;
        if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
-               pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
-                       info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
+               pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n",
+                                   info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
                return -EINVAL;
        }
        if (info->name[0] == '\0' ||
@@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
                add = true;
                break;
        default:
-               pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
+               pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n");
                return -EINVAL;
        }
 
@@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input,
                succ   = in4_pton(c, size, (void *)&addr, '\n', NULL);
        }
 
-       if (!succ) {
-               pr_info("illegal address written to procfs\n");
+       if (!succ)
                return -EINVAL;
-       }
 
        spin_lock_bh(&recent_lock);
        e = recent_entry_lookup(t, &addr, family, 0);
index 16b6b11ee83f04aab79d9ff32742ee326711f480..6f4c5217d8358cadb1537b0f4c3a3b4c1ab43175 100644 (file)
@@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
        if (index == IPSET_INVALID_ID) {
-               pr_warn("Cannot find set identified by id %u to match\n",
-                       info->match_set.index);
+               pr_info_ratelimited("Cannot find set identified by id %u to match\n",
+                                   info->match_set.index);
                return -ENOENT;
        }
        if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
-               pr_warn("Protocol error: set match dimension is over the limit!\n");
+               pr_info_ratelimited("set match dimension is over the limit!\n");
                ip_set_nfnl_put(par->net, info->match_set.index);
                return -ERANGE;
        }
@@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par)
        index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
        if (index == IPSET_INVALID_ID) {
-               pr_warn("Cannot find set identified by id %u to match\n",
-                       info->match_set.index);
+               pr_info_ratelimited("Cannot find set identified by id %u to match\n",
+                                   info->match_set.index);
                return -ENOENT;
        }
        if (info->match_set.dim > IPSET_DIM_MAX) {
-               pr_warn("Protocol error: set match dimension is over the limit!\n");
+               pr_info_ratelimited("set match dimension is over the limit!\n");
                ip_set_nfnl_put(par->net, info->match_set.index);
                return -ERANGE;
        }
@@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        if (info->add_set.index != IPSET_INVALID_ID) {
                index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find add_set index %u as target\n",
-                               info->add_set.index);
+                       pr_info_ratelimited("Cannot find add_set index %u as target\n",
+                                           info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        if (info->del_set.index != IPSET_INVALID_ID) {
                index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find del_set index %u as target\n",
-                               info->del_set.index);
+                       pr_info_ratelimited("Cannot find del_set index %u as target\n",
+                                           info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
                                ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
@@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        }
        if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
            info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
-               pr_warn("Protocol error: SET target dimension is over the limit!\n");
+               pr_info_ratelimited("SET target dimension over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
                        ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
@@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        if (info->add_set.index != IPSET_INVALID_ID) {
                index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find add_set index %u as target\n",
-                               info->add_set.index);
+                       pr_info_ratelimited("Cannot find add_set index %u as target\n",
+                                           info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        if (info->del_set.index != IPSET_INVALID_ID) {
                index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find del_set index %u as target\n",
-                               info->del_set.index);
+                       pr_info_ratelimited("Cannot find del_set index %u as target\n",
+                                           info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
                                ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
@@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        }
        if (info->add_set.dim > IPSET_DIM_MAX ||
            info->del_set.dim > IPSET_DIM_MAX) {
-               pr_warn("Protocol error: SET target dimension is over the limit!\n");
+               pr_info_ratelimited("SET target dimension over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
                        ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
@@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
                index = ip_set_nfnl_get_byindex(par->net,
                                                info->add_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find add_set index %u as target\n",
-                               info->add_set.index);
+                       pr_info_ratelimited("Cannot find add_set index %u as target\n",
+                                           info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
                index = ip_set_nfnl_get_byindex(par->net,
                                                info->del_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find del_set index %u as target\n",
-                               info->del_set.index);
+                       pr_info_ratelimited("Cannot find del_set index %u as target\n",
+                                           info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
                                ip_set_nfnl_put(par->net,
                                                info->add_set.index);
@@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
 
        if (info->map_set.index != IPSET_INVALID_ID) {
                if (strncmp(par->table, "mangle", 7)) {
-                       pr_warn("--map-set only usable from mangle table\n");
+                       pr_info_ratelimited("--map-set only usable from mangle table\n");
                        return -EINVAL;
                }
                if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
@@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
                     !(par->hook_mask & (1 << NF_INET_FORWARD |
                                         1 << NF_INET_LOCAL_OUT |
                                         1 << NF_INET_POST_ROUTING))) {
-                       pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
+                       pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
                        return -EINVAL;
                }
                index = ip_set_nfnl_get_byindex(par->net,
                                                info->map_set.index);
                if (index == IPSET_INVALID_ID) {
-                       pr_warn("Cannot find map_set index %u as target\n",
-                               info->map_set.index);
+                       pr_info_ratelimited("Cannot find map_set index %u as target\n",
+                                           info->map_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
                                ip_set_nfnl_put(par->net,
                                                info->add_set.index);
@@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
        if (info->add_set.dim > IPSET_DIM_MAX ||
            info->del_set.dim > IPSET_DIM_MAX ||
            info->map_set.dim > IPSET_DIM_MAX) {
-               pr_warn("Protocol error: SET target dimension is over the limit!\n");
+               pr_info_ratelimited("SET target dimension over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
                        ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
index 575d2153e3b819f32e9a262abddca95a108eee02..2ac7f674d19b16a2392b5c035fa34f58b9c7b910 100644 (file)
@@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par)
                return err;
 
        if (info->flags & ~XT_SOCKET_FLAGS_V1) {
-               pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1);
+               pr_info_ratelimited("unknown flags 0x%x\n",
+                                   info->flags & ~XT_SOCKET_FLAGS_V1);
                return -EINVAL;
        }
        return 0;
@@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par)
                return err;
 
        if (info->flags & ~XT_SOCKET_FLAGS_V2) {
-               pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2);
+               pr_info_ratelimited("unknown flags 0x%x\n",
+                                   info->flags & ~XT_SOCKET_FLAGS_V2);
                return -EINVAL;
        }
        return 0;
@@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par)
        if (err)
                return err;
        if (info->flags & ~XT_SOCKET_FLAGS_V3) {
-               pr_info("unknown flags 0x%x\n",
-                       info->flags & ~XT_SOCKET_FLAGS_V3);
+               pr_info_ratelimited("unknown flags 0x%x\n",
+                                   info->flags & ~XT_SOCKET_FLAGS_V3);
                return -EINVAL;
        }
        return 0;
index 5fbd79194d21ee1f26fa669162b8de92d965f8c8..0b41c0befe3cf1d499de79202d5a50aaf5bf6905 100644 (file)
@@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par)
 
        ret = nf_ct_netns_get(par->net, par->family);
        if (ret < 0)
-               pr_info("cannot load conntrack support for proto=%u\n",
-                       par->family);
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
        return ret;
 }
 
index 1b01eec1fbda5368e31a8b3917286d8b05a4604c..0160f505e337c7370d8f6e4ff6d46ab6d6048bce 100644 (file)
@@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par)
 
        if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
            info->daytime_stop > XT_TIME_MAX_DAYTIME) {
-               pr_info("invalid argument - start or "
-                       "stop time greater than 23:59:59\n");
+               pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
                return -EDOM;
        }
 
        if (info->flags & ~XT_TIME_ALL_FLAGS) {
-               pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
+               pr_info_ratelimited("unknown flags 0x%x\n",
+                                   info->flags & ~XT_TIME_ALL_FLAGS);
                return -EINVAL;
        }
 
index 2ad445c1d27ccda471132e035229188b77a579eb..07e8478068f0a1fafc0b41bb939ed508d40e3f21 100644 (file)
@@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        if (cb->start) {
                ret = cb->start(cb);
                if (ret)
-                       goto error_unlock;
+                       goto error_put;
        }
 
        nlk->cb_running = true;
@@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
         */
        return -EINTR;
 
+error_put:
+       module_put(control->module);
 error_unlock:
        sock_put(sk);
        mutex_unlock(nlk->cb_mutex);
index 367d8c02710181f06bf1a422e41239ed12e77d90..2ceefa183ceed6ba3d06f2aae958104a514f2146 100644 (file)
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
 
        pr_debug("uri: %s, len: %zu\n", uri, uri_len);
 
+       /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
+       if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
+               return NULL;
+
        sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
        if (sdreq == NULL)
                return NULL;
index c0b83dc9d99305850a61b647fe362b6ed52c50aa..f018eafc2a0de57f53a4632c8b924734f25eb8d3 100644 (file)
@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
-       [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
+       [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
+                              .len = U8_MAX - 4 },
        [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
 };
 
index 94e190febfddd0670c0a16b35501acda16df548d..2da3176bf7924d9132647d28a3c3263716ead608 100644 (file)
@@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
        if (rds_destroy_pending(conn))
                ret = -ENETDOWN;
        else
-               ret = trans->conn_alloc(conn, gfp);
+               ret = trans->conn_alloc(conn, GFP_ATOMIC);
        if (ret) {
                rcu_read_unlock();
                kfree(conn->c_path);
index 42410e910affbdf39691a0cb080c053b1e8aa661..cf73dc006c3bf0d3866461c63a5a7c51c161c90b 100644 (file)
@@ -445,7 +445,7 @@ send_fragmentable:
                                        (char *)&opt, sizeof(opt));
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
-                                            iov, 1, iov[0].iov_len);
+                                            iov, 2, len);
 
                        opt = IPV6_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket,
index cc21e8db25b0b730bbe66dbee6d358177df9ae09..9d45d8b567447c7eb8e35a997d953ed63197d715 100644 (file)
@@ -517,9 +517,10 @@ try_again:
                        ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
                                       sizeof(unsigned int), &id32);
                } else {
+                       unsigned long idl = call->user_call_ID;
+
                        ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
-                                      sizeof(unsigned long),
-                                      &call->user_call_ID);
+                                      sizeof(unsigned long), &idl);
                }
                if (ret < 0)
                        goto error_unlock_call;
index 2bc1bc23d42ecd85c1387d6408944d8a7a69c7d4..247b7cc20c131b3bc9f97aa5588139d57837ffc7 100644 (file)
@@ -376,17 +376,12 @@ struct tcf_net {
 static unsigned int tcf_net_id;
 
 static int tcf_block_insert(struct tcf_block *block, struct net *net,
-                           u32 block_index, struct netlink_ext_ack *extack)
+                           struct netlink_ext_ack *extack)
 {
        struct tcf_net *tn = net_generic(net, tcf_net_id);
-       int err;
 
-       err = idr_alloc_u32(&tn->idr, block, &block_index, block_index,
-                           GFP_KERNEL);
-       if (err)
-               return err;
-       block->index = block_index;
-       return 0;
+       return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
+                            GFP_KERNEL);
 }
 
 static void tcf_block_remove(struct tcf_block *block, struct net *net)
@@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
 }
 
 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
+                                         u32 block_index,
                                          struct netlink_ext_ack *extack)
 {
        struct tcf_block *block;
@@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
                err = -ENOMEM;
                goto err_chain_create;
        }
-       block->net = qdisc_net(q);
        block->refcnt = 1;
        block->net = net;
-       block->q = q;
+       block->index = block_index;
+
+       /* Don't store q pointer for blocks which are shared */
+       if (!tcf_block_shared(block))
+               block->q = q;
        return block;
 
 err_chain_create:
@@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
        }
 
        if (!block) {
-               block = tcf_block_create(net, q, extack);
+               block = tcf_block_create(net, q, ei->block_index, extack);
                if (IS_ERR(block))
                        return PTR_ERR(block);
                created = true;
-               if (ei->block_index) {
-                       err = tcf_block_insert(block, net,
-                                              ei->block_index, extack);
+               if (tcf_block_shared(block)) {
+                       err = tcf_block_insert(block, net, extack);
                        if (err)
                                goto err_block_insert;
                }
@@ -1399,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
                        continue;
                if (!tcf_chain_dump(chain, q, parent, skb, cb,
-                                   index_start, &index))
+                                   index_start, &index)) {
+                       err = -EMSGSIZE;
                        break;
+               }
        }
 
        cb->args[0] = index;
 
 out:
+       /* If we did no progress, the error (EMSGSIZE) is real */
+       if (skb->len == 0 && err)
+               return err;
        return skb->len;
 }
 
index 6c7601a530e35489d38de939264a854209399676..ed8b6a24b9e9325cc99f17e6ed00ead73fe0171e 100644 (file)
@@ -96,7 +96,7 @@ struct tc_u_hnode {
 
 struct tc_u_common {
        struct tc_u_hnode __rcu *hlist;
-       struct tcf_block        *block;
+       void                    *ptr;
        int                     refcnt;
        struct idr              handle_idr;
        struct hlist_node       hnode;
@@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash;
 #define U32_HASH_SHIFT 10
 #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
 
+static void *tc_u_common_ptr(const struct tcf_proto *tp)
+{
+       struct tcf_block *block = tp->chain->block;
+
+       /* The block sharing is currently supported only
+        * for classless qdiscs. In that case we use block
+        * for tc_u_common identification. In case the
+        * block is not shared, block->q is a valid pointer
+        * and we can use that. That works for classful qdiscs.
+        */
+       if (tcf_block_shared(block))
+               return block;
+       else
+               return block->q;
+}
+
 static unsigned int tc_u_hash(const struct tcf_proto *tp)
 {
-       return hash_ptr(tp->chain->block, U32_HASH_SHIFT);
+       return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
 }
 
 static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
@@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
 
        h = tc_u_hash(tp);
        hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
-               if (tc->block == tp->chain->block)
+               if (tc->ptr == tc_u_common_ptr(tp))
                        return tc;
        }
        return NULL;
@@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp)
                        kfree(root_ht);
                        return -ENOBUFS;
                }
-               tp_c->block = tp->chain->block;
+               tp_c->ptr = tc_u_common_ptr(tp);
                INIT_HLIST_NODE(&tp_c->hnode);
                idr_init(&tp_c->handle_idr);
 
index 291c97b07058218635fcfcd06214aa79d74ec80d..8f6c2e8c0953647868e9223801c5060ce9895760 100644 (file)
@@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid)
        case SCTP_CID_RECONF:
                return "RECONF";
 
+       case SCTP_CID_I_DATA:
+               return "I_DATA";
+
+       case SCTP_CID_I_FWD_TSN:
+               return "I_FWD_TSN";
+
        default:
                break;
        }
index 141c9c466ec172ff67930cf38eb02a49e01a12ab..0247cc432e0293f2881a594367e79c666e9b9f4d 100644 (file)
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
        rhl_for_each_entry_rcu(transport, tmp, list, node)
                if (transport->asoc->ep == t->asoc->ep) {
                        rcu_read_unlock();
-                       err = -EEXIST;
-                       goto out;
+                       return -EEXIST;
                }
        rcu_read_unlock();
 
        err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
                                  &t->node, sctp_hash_params);
-
-out:
        if (err)
                pr_err_once("insert transport fail, errno %d\n", err);
 
index cedf672487f9d47d76625c3cd32ab6c04676a6e8..f799043abec9a48a26ba152cfa3aaa63b7df47ea 100644 (file)
@@ -6,7 +6,7 @@
  *
  * This file is part of the SCTP kernel implementation
  *
- * These functions manipulate sctp tsn mapping array.
+ * This file contains sctp stream maniuplation primitives and helpers.
  *
  * This SCTP implementation is free software;
  * you can redistribute it and/or modify it under the terms of
index 8c7cf8f08711f93a460354faa220d3d564dcbe75..d3764c18129971c22ce37d60285ec06ef5fe60a1 100644 (file)
@@ -3,7 +3,8 @@
  *
  * This file is part of the SCTP kernel implementation
  *
- * These functions manipulate sctp stream queue/scheduling.
+ * These functions implement sctp stream message interleaving, mostly
+ * including I-DATA and I-FORWARD-TSN chunks process.
  *
  * This SCTP implementation is free software;
  * you can redistribute it and/or modify it under the terms of
@@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
        __u32 freed = 0;
        __u16 needed;
 
-       if (chunk) {
-               needed = ntohs(chunk->chunk_hdr->length);
-               needed -= sizeof(struct sctp_idata_chunk);
-       } else {
-               needed = SCTP_DEFAULT_MAXWINDOW;
-       }
+       needed = ntohs(chunk->chunk_hdr->length) -
+                sizeof(struct sctp_idata_chunk);
 
        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
                freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
@@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
                                                       needed);
        }
 
-       if (chunk && freed >= needed)
-               if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
-                       sctp_intl_start_pd(ulpq, gfp);
+       if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
+               sctp_intl_start_pd(ulpq, gfp);
 
        sk_mem_reclaim(asoc->base.sk);
 }
index c8001471da6c3c53be6c63dde1311302b093f415..3e3dce3d4c63dc1856571583182e9538ae7c9de3 100644 (file)
@@ -813,7 +813,7 @@ err_out:
        return err;
 }
 
-int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
 {
        int err;
        char *name;
@@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
 
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
-       rtnl_lock();
        bearer = tipc_bearer_find(net, name);
-       if (!bearer) {
-               rtnl_unlock();
+       if (!bearer)
                return -EINVAL;
-       }
 
        bearer_disable(net, bearer);
-       rtnl_unlock();
 
        return 0;
 }
 
-int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+{
+       int err;
+
+       rtnl_lock();
+       err = __tipc_nl_bearer_disable(skb, info);
+       rtnl_unlock();
+
+       return err;
+}
+
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
 {
        int err;
        char *bearer;
@@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
                        prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
        }
 
+       return tipc_enable_bearer(net, bearer, domain, prio, attrs);
+}
+
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+{
+       int err;
+
        rtnl_lock();
-       err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
-       if (err) {
-               rtnl_unlock();
-               return err;
-       }
+       err = __tipc_nl_bearer_enable(skb, info);
        rtnl_unlock();
 
-       return 0;
+       return err;
 }
 
 int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
@@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
        return 0;
 }
 
-int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
 {
        int err;
        char *name;
@@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
 
-       rtnl_lock();
        b = tipc_bearer_find(net, name);
-       if (!b) {
-               rtnl_unlock();
+       if (!b)
                return -EINVAL;
-       }
 
        if (attrs[TIPC_NLA_BEARER_PROP]) {
                struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
 
                err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
                                              props);
-               if (err) {
-                       rtnl_unlock();
+               if (err)
                        return err;
-               }
 
                if (props[TIPC_NLA_PROP_TOL])
                        b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
                if (props[TIPC_NLA_PROP_WIN])
                        b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
        }
-       rtnl_unlock();
 
        return 0;
 }
 
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+{
+       int err;
+
+       rtnl_lock();
+       err = __tipc_nl_bearer_set(skb, info);
+       rtnl_unlock();
+
+       return err;
+}
+
 static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
                               struct tipc_media *media, int nlflags)
 {
@@ -1115,7 +1130,7 @@ err_out:
        return err;
 }
 
-int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
 {
        int err;
        char *name;
@@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
 
-       rtnl_lock();
        m = tipc_media_find(name);
-       if (!m) {
-               rtnl_unlock();
+       if (!m)
                return -EINVAL;
-       }
 
        if (attrs[TIPC_NLA_MEDIA_PROP]) {
                struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
 
                err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
                                              props);
-               if (err) {
-                       rtnl_unlock();
+               if (err)
                        return err;
-               }
 
                if (props[TIPC_NLA_PROP_TOL])
                        m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
                if (props[TIPC_NLA_PROP_WIN])
                        m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
        }
-       rtnl_unlock();
 
        return 0;
 }
+
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+{
+       int err;
+
+       rtnl_lock();
+       err = __tipc_nl_media_set(skb, info);
+       rtnl_unlock();
+
+       return err;
+}
index 42d6eeeb646ddca457aec269de1650b1269cb411..a53613d95bc9fee54bb8762d10b97ce3118d5d29 100644 (file)
@@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info;
 #endif
 
 int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
 
 int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
 
 int tipc_media_set_priority(const char *name, u32 new_value);
 int tipc_media_set_window(const char *name, u32 new_value);
index 719c5924b6383e6bf548baf57fdb713283012d87..1a2fde0d6f61398f5552750d782eded94668f56f 100644 (file)
@@ -200,7 +200,7 @@ out:
        return skb->len;
 }
 
-int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = sock_net(skb->sk);
        struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
                if (!tipc_addr_node_valid(addr))
                        return -EINVAL;
 
-               rtnl_lock();
                tipc_net_start(net, addr);
-               rtnl_unlock();
        }
 
        return 0;
 }
+
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+{
+       int err;
+
+       rtnl_lock();
+       err = __tipc_nl_net_set(skb, info);
+       rtnl_unlock();
+
+       return err;
+}
index c7c254902873976797c11fca9f5bcb41a4839e10..c0306aa2374b7c1c845bc6b8182740ae26eea9e4 100644 (file)
@@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net);
 
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
 
 #endif
index e48f0b2c01b962dfa2b3516f9ec2f0c3b461db95..4492cda45566503627ad20a648524fdd59e3cbde 100644 (file)
@@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
        if (!trans_buf)
                return -ENOMEM;
 
-       err = (*cmd->transcode)(cmd, trans_buf, msg);
-       if (err)
-               goto trans_out;
-
        attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
                        sizeof(struct nlattr *), GFP_KERNEL);
        if (!attrbuf) {
@@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
                goto trans_out;
        }
 
-       err = nla_parse(attrbuf, tipc_genl_family.maxattr,
-                       (const struct nlattr *)trans_buf->data,
-                       trans_buf->len, NULL, NULL);
-       if (err)
-               goto parse_out;
-
        doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!doit_buf) {
                err = -ENOMEM;
-               goto parse_out;
+               goto attrbuf_out;
        }
 
-       doit_buf->sk = msg->dst_sk;
-
        memset(&info, 0, sizeof(info));
        info.attrs = attrbuf;
 
+       rtnl_lock();
+       err = (*cmd->transcode)(cmd, trans_buf, msg);
+       if (err)
+               goto doit_out;
+
+       err = nla_parse(attrbuf, tipc_genl_family.maxattr,
+                       (const struct nlattr *)trans_buf->data,
+                       trans_buf->len, NULL, NULL);
+       if (err)
+               goto doit_out;
+
+       doit_buf->sk = msg->dst_sk;
+
        err = (*cmd->doit)(doit_buf, &info);
+doit_out:
+       rtnl_unlock();
 
        kfree_skb(doit_buf);
-parse_out:
+attrbuf_out:
        kfree(attrbuf);
 trans_out:
        kfree_skb(trans_buf);
@@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
 
        media = tipc_media_find(lc->name);
        if (media) {
-               cmd->doit = &tipc_nl_media_set;
+               cmd->doit = &__tipc_nl_media_set;
                return tipc_nl_compat_media_set(skb, msg);
        }
 
        bearer = tipc_bearer_find(msg->net, lc->name);
        if (bearer) {
-               cmd->doit = &tipc_nl_bearer_set;
+               cmd->doit = &__tipc_nl_bearer_set;
                return tipc_nl_compat_bearer_set(skb, msg);
        }
 
@@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
                return tipc_nl_compat_dumpit(&dump, msg);
        case TIPC_CMD_ENABLE_BEARER:
                msg->req_type = TIPC_TLV_BEARER_CONFIG;
-               doit.doit = tipc_nl_bearer_enable;
+               doit.doit = __tipc_nl_bearer_enable;
                doit.transcode = tipc_nl_compat_bearer_enable;
                return tipc_nl_compat_doit(&doit, msg);
        case TIPC_CMD_DISABLE_BEARER:
                msg->req_type = TIPC_TLV_BEARER_NAME;
-               doit.doit = tipc_nl_bearer_disable;
+               doit.doit = __tipc_nl_bearer_disable;
                doit.transcode = tipc_nl_compat_bearer_disable;
                return tipc_nl_compat_doit(&doit, msg);
        case TIPC_CMD_SHOW_LINK_STATS:
@@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
                return tipc_nl_compat_dumpit(&dump, msg);
        case TIPC_CMD_SET_NODE_ADDR:
                msg->req_type = TIPC_TLV_NET_ADDR;
-               doit.doit = tipc_nl_net_set;
+               doit.doit = __tipc_nl_net_set;
                doit.transcode = tipc_nl_compat_net_set;
                return tipc_nl_compat_doit(&doit, msg);
        case TIPC_CMD_SET_NETID:
                msg->req_type = TIPC_TLV_UNSIGNED;
-               doit.doit = tipc_nl_net_set;
+               doit.doit = __tipc_nl_net_set;
                doit.transcode = tipc_nl_compat_net_set;
                return tipc_nl_compat_doit(&doit, msg);
        case TIPC_CMD_GET_NETID:
index b0d5fcea47e73488b355b479022f77cb6b97f1c8..e9b4b53ab53e08b2b8ddaf7e57da9d0a9862a4b1 100644 (file)
@@ -308,8 +308,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
                        goto out;
                }
                lock_sock(sk);
-               memcpy(crypto_info_aes_gcm_128->iv, ctx->iv,
+               memcpy(crypto_info_aes_gcm_128->iv,
+                      ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
                       TLS_CIPHER_AES_GCM_128_IV_SIZE);
+               memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq,
+                      TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
                release_sock(sk);
                if (copy_to_user(optval,
                                 crypto_info_aes_gcm_128,
@@ -375,7 +378,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
        rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
                rc = -EFAULT;
-               goto out;
+               goto err_crypto_info;
        }
 
        /* check version */
index d545e1d0dea22cf15426c73224a8ce38855f202b..2d465bdeccbc7c60288c56c8be24f87fc8facf05 100644 (file)
@@ -1825,7 +1825,7 @@ out:
 }
 
 /* We use paged skbs for stream sockets, and limit occupancy to 32768
- * bytes, and a minimun of a full page.
+ * bytes, and a minimum of a full page.
  */
 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
 
index 51aa55618ef755f97a4a61c5a24d2eb016cfba42..b12da6ef3c122beced98723f6aa3465abb3daa74 100644 (file)
@@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                enum nl80211_bss_scan_width scan_width;
                struct ieee80211_supported_band *sband =
                                rdev->wiphy.bands[setup->chandef.chan->band];
-               scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
-               setup->basic_rates = ieee80211_mandatory_rates(sband,
-                                                              scan_width);
+
+               if (setup->chandef.chan->band == NL80211_BAND_2GHZ) {
+                       int i;
+
+                       /*
+                        * Older versions selected the mandatory rates for
+                        * 2.4 GHz as well, but were broken in that only
+                        * 1 Mbps was regarded as a mandatory rate. Keep
+                        * using just 1 Mbps as the default basic rate for
+                        * mesh to be interoperable with older versions.
+                        */
+                       for (i = 0; i < sband->n_bitrates; i++) {
+                               if (sband->bitrates[i].bitrate == 10) {
+                                       setup->basic_rates = BIT(i);
+                                       break;
+                               }
+                       }
+               } else {
+                       scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+                       setup->basic_rates = ieee80211_mandatory_rates(sband,
+                                                                      scan_width);
+               }
        }
 
        err = cfg80211_chandef_dfs_required(&rdev->wiphy,
index fdb3646274a5673e1e77a832401c1d6b890ff070..701cfd7acc1bc477aa03bdf8ccb30e72755d1e33 100644 (file)
@@ -1032,6 +1032,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
        wdev->current_bss = NULL;
        wdev->ssid_len = 0;
        wdev->conn_owner_nlportid = 0;
+       kzfree(wdev->connect_keys);
+       wdev->connect_keys = NULL;
 
        nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
 
index 0e349b80686e76a421759b931ec04d194e446768..ba942e3ead890de9f4632d25a5fc1fd000676296 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+ifndef CROSS_COMPILE
 hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
 
 HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
@@ -16,7 +17,6 @@ HOSTCFLAGS_bpf-direct.o += -idirafter $(objtree)/include
 bpf-direct-objs := bpf-direct.o
 
 # Try to match the kernel target.
-ifndef CROSS_COMPILE
 ifndef CONFIG_64BIT
 
 # s390 has -m31 flag to build 31 bit binaries
@@ -35,12 +35,4 @@ HOSTLOADLIBES_bpf-fancy += $(MFLAG)
 HOSTLOADLIBES_dropper += $(MFLAG)
 endif
 always := $(hostprogs-m)
-else
-# MIPS system calls are defined based on the -mabi that is passed
-# to the toolchain which may or may not be a valid option
-# for the host toolchain. So disable tests if target architecture
-# is MIPS but the host isn't.
-ifndef CONFIG_MIPS
-always := $(hostprogs-m)
-endif
 endif
index 47cddf32aeba025f2741dbc7f48d596f4f6653fa..4f2b25d43ec9b46923e52ae5d6f2bf789a6465b4 100644 (file)
@@ -256,6 +256,8 @@ __objtool_obj := $(objtree)/tools/objtool/objtool
 
 objtool_args = $(if $(CONFIG_UNWINDER_ORC),orc generate,check)
 
+objtool_args += $(if $(part-of-module), --module,)
+
 ifndef CONFIG_FRAME_POINTER
 objtool_args += --no-fp
 endif
@@ -264,6 +266,12 @@ objtool_args += --no-unreachable
 else
 objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_CFLAGS),)
+  objtool_args += --retpoline
+endif
+endif
+
 
 ifdef CONFIG_MODVERSIONS
 objtool_o = $(@D)/.tmp_$(@F)
index 1249b727644b742db1c215eda9691e3d5e69d877..8fd6437beda80778554cd43c47d3e120e0f0c4d2 100644 (file)
@@ -56,10 +56,10 @@ statement S;
 p << r.p;
 @@
 
-coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdep")
+coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdup")
 
 @script:python depends on report@
 p << r.p;
 @@
 
-coccilib.report.print_report(p[0], "WARNING opportunity for kmemdep")
+coccilib.report.print_report(p[0], "WARNING opportunity for kmemdup")
index 9ee9bf7fd1a2113bfb869cf53b6d26d2f0852455..65792650c63057f0ed7edf7223dad30c06ac2391 100644 (file)
@@ -595,7 +595,7 @@ static void optimize_result(void)
                 * original char code */
                if (!best_table_len[i]) {
 
-                       /* find the token with the breates profit value */
+                       /* find the token with the best profit value */
                        best = find_best_token();
                        if (token_profit[best] == 0)
                                break;
index 5c12dc91ef348ea5a64a31edeb98657cc93512b4..df26c7b0fe13b611087ad8fe73fb7b1ae2257971 100644 (file)
@@ -178,7 +178,7 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
        case S_HEX:
        done:
                if (sym_string_valid(sym, p)) {
-                       sym->def[def].val = strdup(p);
+                       sym->def[def].val = xstrdup(p);
                        sym->flags |= def_flags;
                } else {
                        if (def != S_DEF_AUTO)
index 2858738b22d5aeac27154f74af433540839d37a6..240880a89111df06e95da0b44b08fa8aa184fa58 100644 (file)
@@ -101,7 +101,7 @@ static struct message *message__new(const char *msg, char *option,
        if (self->files == NULL)
                goto out_fail;
 
-       self->msg = strdup(msg);
+       self->msg = xstrdup(msg);
        if (self->msg == NULL)
                goto out_fail_msg;
 
index 4e23febbe4b2836451ab91327c634b268555bc53..2d5ec2d0e95293c8293adacb229dc2ecb5e6f170 100644 (file)
@@ -115,6 +115,7 @@ int file_write_dep(const char *name);
 void *xmalloc(size_t size);
 void *xcalloc(size_t nmemb, size_t size);
 void *xrealloc(void *p, size_t size);
+char *xstrdup(const char *s);
 
 struct gstr {
        size_t len;
index a10bd9d6fafd003a57aeb2baf24d4bc6dd04c982..6c0bcd9c472d6bee0b9fdadab18e9d46e5b05cdb 100755 (executable)
@@ -55,7 +55,8 @@ EOF
            echo " *** required header files."                            1>&2
            echo " *** 'make menuconfig' requires the ncurses libraries." 1>&2
            echo " *** "                                                  1>&2
-           echo " *** Install ncurses (ncurses-devel) and try again."    1>&2
+           echo " *** Install ncurses (ncurses-devel or libncurses-dev " 1>&2
+           echo " *** depending on your distribution) and try again."    1>&2
            echo " *** "                                                  1>&2
            exit 1
        fi
index 99222855544c3a7c2f67b3c33b3796eb36e5ff80..36cd3e1f1c28895b1a64740d8bcac17454197d70 100644 (file)
@@ -212,6 +212,7 @@ void menu_add_option(int token, char *arg)
                        sym_defconfig_list = current_entry->sym;
                else if (sym_defconfig_list != current_entry->sym)
                        zconf_error("trying to redefine defconfig symbol");
+               sym_defconfig_list->flags |= SYMBOL_AUTO;
                break;
        case T_OPT_ENV:
                prop_add_env(arg);
index cca9663be5ddd918703d534ef95368ccc44db322..2220bc4b051bd914e34bd20beb351b98da5ecc86 100644 (file)
@@ -183,7 +183,7 @@ static void sym_validate_range(struct symbol *sym)
                sprintf(str, "%lld", val2);
        else
                sprintf(str, "0x%llx", val2);
-       sym->curr.val = strdup(str);
+       sym->curr.val = xstrdup(str);
 }
 
 static void sym_set_changed(struct symbol *sym)
@@ -849,7 +849,7 @@ struct symbol *sym_lookup(const char *name, int flags)
                                   : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE))))
                                return symbol;
                }
-               new_name = strdup(name);
+               new_name = xstrdup(name);
        } else {
                new_name = NULL;
                hash = 0;
index b98a79e30e04a4017bd33264e85b2c40e46bc3af..c6f6e21b809ffe7a6f60acd2a7f016ee88971d5c 100644 (file)
@@ -154,3 +154,14 @@ void *xrealloc(void *p, size_t size)
        fprintf(stderr, "Out of memory.\n");
        exit(1);
 }
+
+char *xstrdup(const char *s)
+{
+       char *p;
+
+       p = strdup(s);
+       if (p)
+               return p;
+       fprintf(stderr, "Out of memory.\n");
+       exit(1);
+}
index 02de6fe302a9aec4fc747e5728f5d59f48946174..88b650eb9cc9141233d5da9be8702b9069de65e6 100644 (file)
@@ -332,16 +332,12 @@ void zconf_nextfile(const char *name)
                                "Inclusion path:\n  current file : '%s'\n",
                                zconf_curname(), zconf_lineno(),
                                zconf_curname());
-                       iter = current_file->parent;
-                       while (iter && \
-                              strcmp(iter->name,current_file->name)) {
-                               fprintf(stderr, "  included from: '%s:%d'\n",
-                                       iter->name, iter->lineno-1);
+                       iter = current_file;
+                       do {
                                iter = iter->parent;
-                       }
-                       if (iter)
                                fprintf(stderr, "  included from: '%s:%d'\n",
-                                       iter->name, iter->lineno+1);
+                                       iter->name, iter->lineno - 1);
+                       } while (strcmp(iter->name, current_file->name));
                        exit(1);
                }
        }
index 4be98050b961fe73df6bf6516b1e5a7f8135d149..ad6305b0f40cb962edf922ae73639c75b63c60c0 100644 (file)
@@ -127,7 +127,7 @@ no_mainmenu_stmt: /* empty */
         * later regardless of whether it comes from the 'prompt' in
         * mainmenu_stmt or here
         */
-       menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
+       menu_add_prompt(P_MENU, xstrdup("Linux Kernel Configuration"), NULL);
 };
 
 
@@ -276,6 +276,7 @@ choice: T_CHOICE word_opt T_EOL
        sym->flags |= SYMBOL_AUTO;
        menu_add_entry(sym);
        menu_add_expr(P_CHOICE, NULL, NULL);
+       free($2);
        printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
 };
 
index c0d129d7f4304abfac7f2e7d699000df02678f73..be56a1153014af54af0db2bf23c92306c4a8b4fb 100755 (executable)
@@ -246,7 +246,7 @@ else
 fi;
 
 # final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
 
 archive_builtin
 
index 6f9e4ce568cd87fd8b529a2e8c08c615c6f790a5..9bb0a7f2863e3750ced13183d083281d320f9109 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cred.h>
 #include <linux/key-type.h>
 #include <linux/digsig.h>
+#include <linux/vmalloc.h>
 #include <crypto/public_key.h>
 #include <keys/system_keyring.h>
 
index 929e14978c421b227e592e937d9157ca2235b2f7..fa728f662a6f3e094dccacca44d1ebc099e5a0c6 100644 (file)
 #include <keys/big_key-type.h>
 #include <crypto/aead.h>
 
+struct big_key_buf {
+       unsigned int            nr_pages;
+       void                    *virt;
+       struct scatterlist      *sg;
+       struct page             *pages[];
+};
+
 /*
  * Layout of key payload words.
  */
@@ -91,10 +98,9 @@ static DEFINE_MUTEX(big_key_aead_lock);
 /*
  * Encrypt/decrypt big_key data
  */
-static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
+static int big_key_crypt(enum big_key_op op, struct big_key_buf *buf, size_t datalen, u8 *key)
 {
        int ret;
-       struct scatterlist sgio;
        struct aead_request *aead_req;
        /* We always use a zero nonce. The reason we can get away with this is
         * because we're using a different randomly generated key for every
@@ -109,8 +115,7 @@ static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
                return -ENOMEM;
 
        memset(zero_nonce, 0, sizeof(zero_nonce));
-       sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
-       aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
+       aead_request_set_crypt(aead_req, buf->sg, buf->sg, datalen, zero_nonce);
        aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
        aead_request_set_ad(aead_req, 0);
 
@@ -129,22 +134,82 @@ error:
        return ret;
 }
 
+/*
+ * Free up the buffer.
+ */
+static void big_key_free_buffer(struct big_key_buf *buf)
+{
+       unsigned int i;
+
+       if (buf->virt) {
+               memset(buf->virt, 0, buf->nr_pages * PAGE_SIZE);
+               vunmap(buf->virt);
+       }
+
+       for (i = 0; i < buf->nr_pages; i++)
+               if (buf->pages[i])
+                       __free_page(buf->pages[i]);
+
+       kfree(buf);
+}
+
+/*
+ * Allocate a buffer consisting of a set of pages with a virtual mapping
+ * applied over them.
+ */
+static void *big_key_alloc_buffer(size_t len)
+{
+       struct big_key_buf *buf;
+       unsigned int npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       unsigned int i, l;
+
+       buf = kzalloc(sizeof(struct big_key_buf) +
+                     sizeof(struct page) * npg +
+                     sizeof(struct scatterlist) * npg,
+                     GFP_KERNEL);
+       if (!buf)
+               return NULL;
+
+       buf->nr_pages = npg;
+       buf->sg = (void *)(buf->pages + npg);
+       sg_init_table(buf->sg, npg);
+
+       for (i = 0; i < buf->nr_pages; i++) {
+               buf->pages[i] = alloc_page(GFP_KERNEL);
+               if (!buf->pages[i])
+                       goto nomem;
+
+               l = min_t(size_t, len, PAGE_SIZE);
+               sg_set_page(&buf->sg[i], buf->pages[i], l, 0);
+               len -= l;
+       }
+
+       buf->virt = vmap(buf->pages, buf->nr_pages, VM_MAP, PAGE_KERNEL);
+       if (!buf->virt)
+               goto nomem;
+
+       return buf;
+
+nomem:
+       big_key_free_buffer(buf);
+       return NULL;
+}
+
 /*
  * Preparse a big key
  */
 int big_key_preparse(struct key_preparsed_payload *prep)
 {
+       struct big_key_buf *buf;
        struct path *path = (struct path *)&prep->payload.data[big_key_path];
        struct file *file;
        u8 *enckey;
-       u8 *data = NULL;
        ssize_t written;
-       size_t datalen = prep->datalen;
+       size_t datalen = prep->datalen, enclen = datalen + ENC_AUTHTAG_SIZE;
        int ret;
 
-       ret = -EINVAL;
        if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
-               goto error;
+               return -EINVAL;
 
        /* Set an arbitrary quota */
        prep->quotalen = 16;
@@ -157,13 +222,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                 *
                 * File content is stored encrypted with randomly generated key.
                 */
-               size_t enclen = datalen + ENC_AUTHTAG_SIZE;
                loff_t pos = 0;
 
-               data = kmalloc(enclen, GFP_KERNEL);
-               if (!data)
+               buf = big_key_alloc_buffer(enclen);
+               if (!buf)
                        return -ENOMEM;
-               memcpy(data, prep->data, datalen);
+               memcpy(buf->virt, prep->data, datalen);
 
                /* generate random key */
                enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -176,7 +240,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                        goto err_enckey;
 
                /* encrypt aligned data */
-               ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
+               ret = big_key_crypt(BIG_KEY_ENC, buf, datalen, enckey);
                if (ret)
                        goto err_enckey;
 
@@ -187,7 +251,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                        goto err_enckey;
                }
 
-               written = kernel_write(file, data, enclen, &pos);
+               written = kernel_write(file, buf->virt, enclen, &pos);
                if (written != enclen) {
                        ret = written;
                        if (written >= 0)
@@ -202,7 +266,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                *path = file->f_path;
                path_get(path);
                fput(file);
-               kzfree(data);
+               big_key_free_buffer(buf);
        } else {
                /* Just store the data in a buffer */
                void *data = kmalloc(datalen, GFP_KERNEL);
@@ -220,7 +284,7 @@ err_fput:
 err_enckey:
        kzfree(enckey);
 error:
-       kzfree(data);
+       big_key_free_buffer(buf);
        return ret;
 }
 
@@ -298,15 +362,15 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
                return datalen;
 
        if (datalen > BIG_KEY_FILE_THRESHOLD) {
+               struct big_key_buf *buf;
                struct path *path = (struct path *)&key->payload.data[big_key_path];
                struct file *file;
-               u8 *data;
                u8 *enckey = (u8 *)key->payload.data[big_key_data];
                size_t enclen = datalen + ENC_AUTHTAG_SIZE;
                loff_t pos = 0;
 
-               data = kmalloc(enclen, GFP_KERNEL);
-               if (!data)
+               buf = big_key_alloc_buffer(enclen);
+               if (!buf)
                        return -ENOMEM;
 
                file = dentry_open(path, O_RDONLY, current_cred());
@@ -316,26 +380,26 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
                }
 
                /* read file to kernel and decrypt */
-               ret = kernel_read(file, data, enclen, &pos);
+               ret = kernel_read(file, buf->virt, enclen, &pos);
                if (ret >= 0 && ret != enclen) {
                        ret = -EIO;
                        goto err_fput;
                }
 
-               ret = big_key_crypt(BIG_KEY_DEC, data, enclen, enckey);
+               ret = big_key_crypt(BIG_KEY_DEC, buf, enclen, enckey);
                if (ret)
                        goto err_fput;
 
                ret = datalen;
 
                /* copy decrypted data to user */
-               if (copy_to_user(buffer, data, datalen) != 0)
+               if (copy_to_user(buffer, buf->virt, datalen) != 0)
                        ret = -EFAULT;
 
 err_fput:
                fput(file);
 error:
-               kzfree(data);
+               big_key_free_buffer(buf);
        } else {
                ret = datalen;
                if (copy_to_user(buffer, key->payload.data[big_key_data],
index 0b3026d937b101918581edab80aa3b2803ecec5a..8a77620a38548ef8ad36d5b9bf154a14a785f568 100644 (file)
@@ -889,7 +889,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
 
        index_offset = snd_ctl_get_ioff(kctl, &control->id);
        vd = &kctl->vd[index_offset];
-       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) && kctl->get == NULL)
+       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
                return -EPERM;
 
        snd_ctl_build_ioff(&control->id, kctl, index_offset);
index c71dcacea807bf0e0d11aa147401acd12280686c..96143df19b2150884fdea3d6eb26ea49fd65a874 100644 (file)
@@ -181,7 +181,7 @@ static const struct kernel_param_ops param_ops_xint = {
 };
 #define param_check_xint param_check_int
 
-static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+static int power_save = -1;
 module_param(power_save, xint, 0644);
 MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
                 "(in second, 0 = disable).");
@@ -2186,6 +2186,24 @@ out_free:
        return err;
 }
 
+#ifdef CONFIG_PM
+/* On some boards setting power_save to a non 0 value leads to clicking /
+ * popping sounds when ever we enter/leave powersaving mode. Ideally we would
+ * figure out how to avoid these sounds, but that is not always feasible.
+ * So we keep a list of devices where we disable powersaving as its known
+ * to causes problems on these devices.
+ */
+static struct snd_pci_quirk power_save_blacklist[] = {
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+       SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+       {}
+};
+#endif /* CONFIG_PM */
+
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
 static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
        [AZX_DRIVER_NVIDIA] = 8,
@@ -2198,6 +2216,7 @@ static int azx_probe_continue(struct azx *chip)
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
        int dev = chip->dev_index;
+       int val;
        int err;
 
        hda->probe_continued = 1;
@@ -2278,7 +2297,22 @@ static int azx_probe_continue(struct azx *chip)
 
        chip->running = 1;
        azx_add_card_list(chip);
-       snd_hda_set_power_save(&chip->bus, power_save * 1000);
+
+       val = power_save;
+#ifdef CONFIG_PM
+       if (val == -1) {
+               const struct snd_pci_quirk *q;
+
+               val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
+               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+               if (q && val) {
+                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+                                q->subvendor, q->subdevice);
+                       val = 0;
+               }
+       }
+#endif /* CONFIG_PM */
+       snd_hda_set_power_save(&chip->bus, val * 1000);
        if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
                pm_runtime_put_autosuspend(&pci->dev);
 
index ce28f7ce64e63774655a421bf1102b7c354786dc..b9c93fa0a51c6e2ec57a51fae96b4d4f8eaf52b8 100644 (file)
@@ -4997,13 +4997,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+               snd_hda_apply_pincfgs(codec, pincfgs);
+       } else if (action == HDA_FIXUP_ACT_INIT) {
                /* Enable DOCK device */
                snd_hda_codec_write(codec, 0x17, 0,
                            AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
                /* Enable DOCK device */
                snd_hda_codec_write(codec, 0x19, 0,
                            AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
-               snd_hda_apply_pincfgs(codec, pincfgs);
        }
 }
 
index 50252046b01df7cdb376e1c8ae8c59559befdd1f..754e632a27bd2e8c15931e821fa158bf50bfdb78 100644 (file)
@@ -3325,4 +3325,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
        }
 },
 
+{
+       /*
+        * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
+        * even though it advertises more. The capture interface doesn't work
+        * even on windows.
+        */
+       USB_DEVICE(0x19b5, 0x0021),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       /* Capture */
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_IGNORE_INTERFACE,
+                       },
+                       /* Playback */
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 2,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+                                               UAC_EP_CS_ATTR_SAMPLE_RATE,
+                                       .endpoint = 0x03,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC,
+                                       .rates = SNDRV_PCM_RATE_48000,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                                       .nr_rates = 1,
+                                       .rate_table = (unsigned int[]) {
+                                               48000
+                                       }
+                               }
+                       },
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index a0951505c7f5b2804c3203fe7eeb0dea7d4935b1..4ed9d0c41843888d6e9c4aa7f79b980c3e0ac74b 100644 (file)
@@ -50,6 +50,7 @@
 /*standard module options for ALSA. This module supports only one card*/
 static int hdmi_card_index = SNDRV_DEFAULT_IDX1;
 static char *hdmi_card_id = SNDRV_DEFAULT_STR1;
+static bool single_port;
 
 module_param_named(index, hdmi_card_index, int, 0444);
 MODULE_PARM_DESC(index,
@@ -57,6 +58,9 @@ MODULE_PARM_DESC(index,
 module_param_named(id, hdmi_card_id, charp, 0444);
 MODULE_PARM_DESC(id,
                "ID string for INTEL Intel HDMI Audio controller.");
+module_param(single_port, bool, 0444);
+MODULE_PARM_DESC(single_port,
+               "Single-port mode (for compatibility)");
 
 /*
  * ELD SA bits in the CEA Speaker Allocation data block
@@ -1579,7 +1583,11 @@ static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id)
 static void notify_audio_lpe(struct platform_device *pdev, int port)
 {
        struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev);
-       struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+       struct snd_intelhad *ctx;
+
+       ctx = &card_ctx->pcm_ctx[single_port ? 0 : port];
+       if (single_port)
+               ctx->port = port;
 
        schedule_work(&ctx->hdmi_audio_wq);
 }
@@ -1743,6 +1751,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 {
        struct snd_card *card;
        struct snd_intelhad_card *card_ctx;
+       struct snd_intelhad *ctx;
        struct snd_pcm *pcm;
        struct intel_hdmi_lpe_audio_pdata *pdata;
        int irq;
@@ -1787,6 +1796,21 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, card_ctx);
 
+       card_ctx->num_pipes = pdata->num_pipes;
+       card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
+
+       for_each_port(card_ctx, port) {
+               ctx = &card_ctx->pcm_ctx[port];
+               ctx->card_ctx = card_ctx;
+               ctx->dev = card_ctx->dev;
+               ctx->port = single_port ? -1 : port;
+               ctx->pipe = -1;
+
+               spin_lock_init(&ctx->had_spinlock);
+               mutex_init(&ctx->mutex);
+               INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+       }
+
        dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n",
                __func__, (unsigned int)res_mmio->start,
                (unsigned int)res_mmio->end);
@@ -1816,19 +1840,12 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
        init_channel_allocations();
 
        card_ctx->num_pipes = pdata->num_pipes;
-       card_ctx->num_ports = pdata->num_ports;
+       card_ctx->num_ports = single_port ? 1 : pdata->num_ports;
 
        for_each_port(card_ctx, port) {
-               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
                int i;
 
-               ctx->card_ctx = card_ctx;
-               ctx->dev = card_ctx->dev;
-               ctx->port = port;
-               ctx->pipe = -1;
-
-               INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
-
+               ctx = &card_ctx->pcm_ctx[port];
                ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
                                  MAX_CAP_STREAMS, &pcm);
                if (ret)
index 3a0396d87c424954ffe3e6c1111dca2a23bd3776..185acfa229b592f0bf0b62d972e90c3746959283 100644 (file)
@@ -244,7 +244,7 @@ static int do_batch(int argc, char **argv)
        }
 
        if (errno && errno != ENOENT) {
-               perror("reading batch file failed");
+               p_err("reading batch file failed: %s", strerror(errno));
                err = -1;
        } else {
                p_info("processed %d lines", lines);
index e8e2baaf93c2665c4a7cf998a85e35eda76b009d..e549e329be8216646dbab3c11a6485d85d02f1c6 100644 (file)
@@ -774,6 +774,9 @@ static int do_dump(int argc, char **argv)
                              n < 0 ? strerror(errno) : "short write");
                        goto err_free;
                }
+
+               if (json_output)
+                       jsonw_null(json_wtr);
        } else {
                if (member_len == &info.jited_prog_len) {
                        const char *name = NULL;
index 860fa151640abd5823e6551fb75e57fd67a0188a..ffca068e4a761ec02635b2383f46e4e876630bd9 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for cgroup tools
 
-CC = $(CROSS_COMPILE)gcc
 CFLAGS = -Wall -Wextra
 
 all: cgroup_event_listener
index 805a2c0cf4cd37c42eeb19e4c72a2e3bc74ced7f..240eda014b371b4e1c63f607e6853d387f475392 100644 (file)
@@ -12,8 +12,6 @@ endif
 # (this improves performance and avoids hard-to-debug behaviour);
 MAKEFLAGS += -r
 
-CC = $(CROSS_COMPILE)gcc
-LD = $(CROSS_COMPILE)ld
 CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
 
 ALL_TARGETS := lsgpio gpio-hammer gpio-event-mon
index 1139d71fa0cf52aabf15cf2ff82bfe125529fb60..5db5e62cebdaeb3277e927ec396abd3ca412218e 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for Hyper-V tools
 
-CC = $(CROSS_COMPILE)gcc
 WARNINGS = -Wall -Wextra
 CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
 
index a08e7a47d6a32e858700d43a1bccdef67583b3ef..332ed2f6c2c2e54ae776fba178551f05fbc38851 100644 (file)
@@ -12,8 +12,6 @@ endif
 # (this improves performance and avoids hard-to-debug behaviour);
 MAKEFLAGS += -r
 
-CC = $(CROSS_COMPILE)gcc
-LD = $(CROSS_COMPILE)ld
 CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
 
 ALL_TARGETS := iio_event_monitor lsiio iio_generic_buffer
index a5684d0968b4fd087905e659c0ce80bd170434c2..5898c22ba310bdf27f626a2dfb7bc45e5cf2a505 100755 (executable)
@@ -33,7 +33,7 @@ import resource
 import struct
 import re
 import subprocess
-from collections import defaultdict
+from collections import defaultdict, namedtuple
 
 VMX_EXIT_REASONS = {
     'EXCEPTION_NMI':        0,
@@ -228,6 +228,7 @@ IOCTL_NUMBERS = {
 }
 
 ENCODING = locale.getpreferredencoding(False)
+TRACE_FILTER = re.compile(r'^[^\(]*$')
 
 
 class Arch(object):
@@ -260,6 +261,11 @@ class Arch(object):
                     return ArchX86(SVM_EXIT_REASONS)
                 return
 
+    def tracepoint_is_child(self, field):
+        if (TRACE_FILTER.match(field)):
+            return None
+        return field.split('(', 1)[0]
+
 
 class ArchX86(Arch):
     def __init__(self, exit_reasons):
@@ -267,6 +273,10 @@ class ArchX86(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = exit_reasons
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchPPC(Arch):
     def __init__(self):
@@ -282,6 +292,10 @@ class ArchPPC(Arch):
         self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
         self.exit_reasons = {}
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchA64(Arch):
     def __init__(self):
@@ -289,6 +303,10 @@ class ArchA64(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = AARCH64_EXIT_REASONS
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        return None
+
 
 class ArchS390(Arch):
     def __init__(self):
@@ -296,6 +314,12 @@ class ArchS390(Arch):
         self.ioctl_numbers = IOCTL_NUMBERS
         self.exit_reasons = None
 
+    def debugfs_is_child(self, field):
+        """ Returns name of parent if 'field' is a child, None otherwise """
+        if field.startswith('instruction_'):
+            return 'exit_instruction'
+
+
 ARCH = Arch.get_arch()
 
 
@@ -331,9 +355,6 @@ class perf_event_attr(ctypes.Structure):
 PERF_TYPE_TRACEPOINT = 2
 PERF_FORMAT_GROUP = 1 << 3
 
-PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
-PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
-
 
 class Group(object):
     """Represents a perf event group."""
@@ -376,8 +397,8 @@ class Event(object):
         self.syscall = self.libc.syscall
         self.name = name
         self.fd = None
-        self.setup_event(group, trace_cpu, trace_pid, trace_point,
-                         trace_filter, trace_set)
+        self._setup_event(group, trace_cpu, trace_pid, trace_point,
+                          trace_filter, trace_set)
 
     def __del__(self):
         """Closes the event's file descriptor.
@@ -390,7 +411,7 @@ class Event(object):
         if self.fd:
             os.close(self.fd)
 
-    def perf_event_open(self, attr, pid, cpu, group_fd, flags):
+    def _perf_event_open(self, attr, pid, cpu, group_fd, flags):
         """Wrapper for the sys_perf_evt_open() syscall.
 
         Used to set up performance events, returns a file descriptor or -1
@@ -409,7 +430,7 @@ class Event(object):
                             ctypes.c_int(pid), ctypes.c_int(cpu),
                             ctypes.c_int(group_fd), ctypes.c_long(flags))
 
-    def setup_event_attribute(self, trace_set, trace_point):
+    def _setup_event_attribute(self, trace_set, trace_point):
         """Returns an initialized ctype perf_event_attr struct."""
 
         id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
@@ -419,8 +440,8 @@ class Event(object):
         event_attr.config = int(open(id_path).read())
         return event_attr
 
-    def setup_event(self, group, trace_cpu, trace_pid, trace_point,
-                    trace_filter, trace_set):
+    def _setup_event(self, group, trace_cpu, trace_pid, trace_point,
+                     trace_filter, trace_set):
         """Sets up the perf event in Linux.
 
         Issues the syscall to register the event in the kernel and
@@ -428,7 +449,7 @@ class Event(object):
 
         """
 
-        event_attr = self.setup_event_attribute(trace_set, trace_point)
+        event_attr = self._setup_event_attribute(trace_set, trace_point)
 
         # First event will be group leader.
         group_leader = -1
@@ -437,8 +458,8 @@ class Event(object):
         if group.events:
             group_leader = group.events[0].fd
 
-        fd = self.perf_event_open(event_attr, trace_pid,
-                                  trace_cpu, group_leader, 0)
+        fd = self._perf_event_open(event_attr, trace_pid,
+                                   trace_cpu, group_leader, 0)
         if fd == -1:
             err = ctypes.get_errno()
             raise OSError(err, os.strerror(err),
@@ -475,6 +496,10 @@ class Event(object):
 
 class Provider(object):
     """Encapsulates functionalities used by all providers."""
+    def __init__(self, pid):
+        self.child_events = False
+        self.pid = pid
+
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
@@ -500,12 +525,12 @@ class TracepointProvider(Provider):
     """
     def __init__(self, pid, fields_filter):
         self.group_leaders = []
-        self.filters = self.get_filters()
+        self.filters = self._get_filters()
         self.update_fields(fields_filter)
-        self.pid = pid
+        super(TracepointProvider, self).__init__(pid)
 
     @staticmethod
-    def get_filters():
+    def _get_filters():
         """Returns a dict of trace events, their filter ids and
         the values that can be filtered.
 
@@ -521,8 +546,8 @@ class TracepointProvider(Provider):
             filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
         return filters
 
-    def get_available_fields(self):
-        """Returns a list of available event's of format 'event name(filter
+    def _get_available_fields(self):
+        """Returns a list of available events of format 'event name(filter
         name)'.
 
         All available events have directories under
@@ -549,11 +574,12 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self.fields = [field for field in self.get_available_fields()
-                       if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self._get_available_fields()
+                       if self.is_field_wanted(fields_filter, field) or
+                       ARCH.tracepoint_is_child(field)]
 
     @staticmethod
-    def get_online_cpus():
+    def _get_online_cpus():
         """Returns a list of cpu id integers."""
         def parse_int_list(list_string):
             """Returns an int list from a string of comma separated integers and
@@ -575,17 +601,17 @@ class TracepointProvider(Provider):
             cpu_string = cpu_list.readline()
             return parse_int_list(cpu_string)
 
-    def setup_traces(self):
+    def _setup_traces(self):
         """Creates all event and group objects needed to be able to retrieve
         data."""
-        fields = self.get_available_fields()
+        fields = self._get_available_fields()
         if self._pid > 0:
             # Fetch list of all threads of the monitored pid, as qemu
             # starts a thread for each vcpu.
             path = os.path.join('/proc', str(self._pid), 'task')
             groupids = self.walkdir(path)[1]
         else:
-            groupids = self.get_online_cpus()
+            groupids = self._get_online_cpus()
 
         # The constant is needed as a buffer for python libs, std
         # streams and other files that the script opens.
@@ -663,7 +689,7 @@ class TracepointProvider(Provider):
         # The garbage collector will get rid of all Event/Group
         # objects and open files after removing the references.
         self.group_leaders = []
-        self.setup_traces()
+        self._setup_traces()
         self.fields = self._fields
 
     def read(self, by_guest=0):
@@ -671,8 +697,12 @@ class TracepointProvider(Provider):
         ret = defaultdict(int)
         for group in self.group_leaders:
             for name, val in group.read().items():
-                if name in self._fields:
-                    ret[name] += val
+                if name not in self._fields:
+                    continue
+                parent = ARCH.tracepoint_is_child(name)
+                if parent:
+                    name += ' ' + parent
+                ret[name] += val
         return ret
 
     def reset(self):
@@ -690,11 +720,11 @@ class DebugfsProvider(Provider):
         self._baseline = {}
         self.do_read = True
         self.paths = []
-        self.pid = pid
+        super(DebugfsProvider, self).__init__(pid)
         if include_past:
-            self.restore()
+            self._restore()
 
-    def get_available_fields(self):
+    def _get_available_fields(self):
         """"Returns a list of available fields.
 
         The fields are all available KVM debugfs files
@@ -704,8 +734,9 @@ class DebugfsProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self._fields = [field for field in self._get_available_fields()
+                        if self.is_field_wanted(fields_filter, field) or
+                        ARCH.debugfs_is_child(field)]
 
     @property
     def fields(self):
@@ -758,7 +789,7 @@ class DebugfsProvider(Provider):
                     paths.append(dir)
         for path in paths:
             for field in self._fields:
-                value = self.read_field(field, path)
+                value = self._read_field(field, path)
                 key = path + field
                 if reset == 1:
                     self._baseline[key] = value
@@ -766,20 +797,21 @@ class DebugfsProvider(Provider):
                     self._baseline[key] = 0
                 if self._baseline.get(key, -1) == -1:
                     self._baseline[key] = value
-                increment = (results.get(field, 0) + value -
-                             self._baseline.get(key, 0))
-                if by_guest:
-                    pid = key.split('-')[0]
-                    if pid in results:
-                        results[pid] += increment
-                    else:
-                        results[pid] = increment
+                parent = ARCH.debugfs_is_child(field)
+                if parent:
+                    field = field + ' ' + parent
+                else:
+                    if by_guest:
+                        field = key.split('-')[0]    # set 'field' to 'pid'
+                increment = value - self._baseline.get(key, 0)
+                if field in results:
+                    results[field] += increment
                 else:
                     results[field] = increment
 
         return results
 
-    def read_field(self, field, path):
+    def _read_field(self, field, path):
         """Returns the value of a single field from a specific VM."""
         try:
             return int(open(os.path.join(PATH_DEBUGFS_KVM,
@@ -794,12 +826,15 @@ class DebugfsProvider(Provider):
         self._baseline = {}
         self.read(1)
 
-    def restore(self):
+    def _restore(self):
         """Reset field counters"""
         self._baseline = {}
         self.read(2)
 
 
+EventStat = namedtuple('EventStat', ['value', 'delta'])
+
+
 class Stats(object):
     """Manages the data providers and the data they provide.
 
@@ -808,13 +843,13 @@ class Stats(object):
 
     """
     def __init__(self, options):
-        self.providers = self.get_providers(options)
+        self.providers = self._get_providers(options)
         self._pid_filter = options.pid
         self._fields_filter = options.fields
         self.values = {}
+        self._child_events = False
 
-    @staticmethod
-    def get_providers(options):
+    def _get_providers(self, options):
         """Returns a list of data providers depending on the passed options."""
         providers = []
 
@@ -826,7 +861,7 @@ class Stats(object):
 
         return providers
 
-    def update_provider_filters(self):
+    def _update_provider_filters(self):
         """Propagates fields filters to providers."""
         # As we reset the counters when updating the fields we can
         # also clear the cache of old values.
@@ -847,7 +882,7 @@ class Stats(object):
     def fields_filter(self, fields_filter):
         if fields_filter != self._fields_filter:
             self._fields_filter = fields_filter
-            self.update_provider_filters()
+            self._update_provider_filters()
 
     @property
     def pid_filter(self):
@@ -861,16 +896,33 @@ class Stats(object):
             for provider in self.providers:
                 provider.pid = self._pid_filter
 
+    @property
+    def child_events(self):
+        return self._child_events
+
+    @child_events.setter
+    def child_events(self, val):
+        self._child_events = val
+        for provider in self.providers:
+            provider.child_events = val
+
     def get(self, by_guest=0):
         """Returns a dict with field -> (value, delta to last value) of all
-        provider data."""
+        provider data.
+        Key formats:
+          * plain: 'key' is event name
+          * child-parent: 'key' is in format '<child> <parent>'
+          * pid: 'key' is the pid of the guest, and the record contains the
+               aggregated event data
+        These formats are generated by the providers, and handled in class TUI.
+        """
         for provider in self.providers:
             new = provider.read(by_guest=by_guest)
-            for key in new if by_guest else provider.fields:
-                oldval = self.values.get(key, (0, 0))[0]
+            for key in new:
+                oldval = self.values.get(key, EventStat(0, 0)).value
                 newval = new.get(key, 0)
                 newdelta = newval - oldval
-                self.values[key] = (newval, newdelta)
+                self.values[key] = EventStat(newval, newdelta)
         return self.values
 
     def toggle_display_guests(self, to_pid):
@@ -899,10 +951,10 @@ class Stats(object):
         self.get(to_pid)
         return 0
 
+
 DELAY_DEFAULT = 3.0
 MAX_GUEST_NAME_LEN = 48
 MAX_REGEX_LEN = 44
-DEFAULT_REGEX = r'^[^\(]*$'
 SORT_DEFAULT = 0
 
 
@@ -969,7 +1021,7 @@ class Tui(object):
 
         return res
 
-    def print_all_gnames(self, row):
+    def _print_all_gnames(self, row):
         """Print a list of all running guests along with their pids."""
         self.screen.addstr(row, 2, '%8s  %-60s' %
                            ('Pid', 'Guest Name (fuzzy list, might be '
@@ -1032,19 +1084,13 @@ class Tui(object):
 
         return name
 
-    def update_drilldown(self):
-        """Sets or removes a filter that only allows fields without braces."""
-        if not self.stats.fields_filter:
-            self.stats.fields_filter = DEFAULT_REGEX
-
-        elif self.stats.fields_filter == DEFAULT_REGEX:
-            self.stats.fields_filter = None
-
-    def update_pid(self, pid):
+    def _update_pid(self, pid):
         """Propagates pid selection to stats object."""
+        self.screen.addstr(4, 1, 'Updating pid filter...')
+        self.screen.refresh()
         self.stats.pid_filter = pid
 
-    def refresh_header(self, pid=None):
+    def _refresh_header(self, pid=None):
         """Refreshes the header."""
         if pid is None:
             pid = self.stats.pid_filter
@@ -1059,8 +1105,7 @@ class Tui(object):
                                .format(pid, gname), curses.A_BOLD)
         else:
             self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
-        if self.stats.fields_filter and self.stats.fields_filter \
-           != DEFAULT_REGEX:
+        if self.stats.fields_filter:
             regex = self.stats.fields_filter
             if len(regex) > MAX_REGEX_LEN:
                 regex = regex[:MAX_REGEX_LEN] + '...'
@@ -1075,56 +1120,99 @@ class Tui(object):
         self.screen.addstr(4, 1, 'Collecting data...')
         self.screen.refresh()
 
-    def refresh_body(self, sleeptime):
+    def _refresh_body(self, sleeptime):
+        def is_child_field(field):
+            return field.find('(') != -1
+
+        def insert_child(sorted_items, child, values, parent):
+            num = len(sorted_items)
+            for i in range(0, num):
+                # only add child if parent is present
+                if parent.startswith(sorted_items[i][0]):
+                    sorted_items.insert(i + 1, ('  ' + child, values))
+
+        def get_sorted_events(self, stats):
+            """ separate parent and child events """
+            if self._sorting == SORT_DEFAULT:
+                def sortkey((_k, v)):
+                    # sort by (delta value, overall value)
+                    return (v.delta, v.value)
+            else:
+                def sortkey((_k, v)):
+                    # sort by overall value
+                    return v.value
+
+            childs = []
+            sorted_items = []
+            # we can't rule out child events to appear prior to parents even
+            # when sorted - separate out all children first, and add in later
+            for key, values in sorted(stats.items(), key=sortkey,
+                                      reverse=True):
+                if values == (0, 0):
+                    continue
+                if key.find(' ') != -1:
+                    if not self.stats.child_events:
+                        continue
+                    childs.insert(0, (key, values))
+                else:
+                    sorted_items.append((key, values))
+            if self.stats.child_events:
+                for key, values in childs:
+                    (child, parent) = key.split(' ')
+                    insert_child(sorted_items, child, values, parent)
+
+            return sorted_items
+
         row = 3
         self.screen.move(row, 0)
         self.screen.clrtobot()
         stats = self.stats.get(self._display_guests)
-
-        def sortCurAvg(x):
-            # sort by current events if available
-            if stats[x][1]:
-                return (-stats[x][1], -stats[x][0])
+        total = 0.
+        ctotal = 0.
+        for key, values in stats.items():
+            if self._display_guests:
+                if self.get_gname_from_pid(key):
+                    total += values.value
+                continue
+            if not key.find(' ') != -1:
+                total += values.value
             else:
-                return (0, -stats[x][0])
+                ctotal += values.value
+        if total == 0.:
+            # we don't have any fields, or all non-child events are filtered
+            total = ctotal
 
-        def sortTotal(x):
-            # sort by totals
-            return (0, -stats[x][0])
-        total = 0.
-        for key in stats.keys():
-            if key.find('(') is -1:
-                total += stats[key][0]
-        if self._sorting == SORT_DEFAULT:
-            sortkey = sortCurAvg
-        else:
-            sortkey = sortTotal
+        # print events
         tavg = 0
-        for key in sorted(stats.keys(), key=sortkey):
-            if row >= self.screen.getmaxyx()[0] - 1:
-                break
-            values = stats[key]
-            if not values[0] and not values[1]:
+        tcur = 0
+        for key, values in get_sorted_events(self, stats):
+            if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
                 break
-            if values[0] is not None:
-                cur = int(round(values[1] / sleeptime)) if values[1] else ''
-                if self._display_guests:
-                    key = self.get_gname_from_pid(key)
-                self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
-                                   (key, values[0], values[0] * 100 / total,
-                                    cur))
-                if cur is not '' and key.find('(') is -1:
-                    tavg += cur
+            if self._display_guests:
+                key = self.get_gname_from_pid(key)
+                if not key:
+                    continue
+            cur = int(round(values.delta / sleeptime)) if values.delta else ''
+            if key[0] != ' ':
+                if values.delta:
+                    tcur += values.delta
+                ptotal = values.value
+                ltotal = total
+            else:
+                ltotal = ptotal
+            self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' % (key,
+                               values.value,
+                               values.value * 100 / float(ltotal), cur))
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
-        else:
+        if row > 4:
+            tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
             self.screen.addstr(row, 1, '%-40s %10d        %8s' %
-                               ('Total', total, tavg if tavg else ''),
-                               curses.A_BOLD)
+                               ('Total', total, tavg), curses.A_BOLD)
         self.screen.refresh()
 
-    def show_msg(self, text):
+    def _show_msg(self, text):
         """Display message centered text and exit on key press"""
         hint = 'Press any key to continue'
         curses.cbreak()
@@ -1139,16 +1227,16 @@ class Tui(object):
                            curses.A_STANDOUT)
         self.screen.getkey()
 
-    def show_help_interactive(self):
+    def _show_help_interactive(self):
         """Display help with list of interactive commands"""
         msg = ('   b     toggle events by guests (debugfs only, honors'
                ' filters)',
                '   c     clear filter',
                '   f     filter by regular expression',
-               '   g     filter by guest name',
+               '   g     filter by guest name/PID',
                '   h     display interactive commands reference',
                '   o     toggle sorting order (Total vs CurAvg/s)',
-               '   p     filter by PID',
+               '   p     filter by guest name/PID',
                '   q     quit',
                '   r     reset stats',
                '   s     set update interval',
@@ -1165,14 +1253,15 @@ class Tui(object):
             self.screen.addstr(row, 0, line)
             row += 1
         self.screen.getkey()
-        self.refresh_header()
+        self._refresh_header()
 
-    def show_filter_selection(self):
+    def _show_filter_selection(self):
         """Draws filter selection mask.
 
         Asks for a valid regex and sets the fields filter accordingly.
 
         """
+        msg = ''
         while True:
             self.screen.erase()
             self.screen.addstr(0, 0,
@@ -1181,61 +1270,25 @@ class Tui(object):
             self.screen.addstr(2, 0,
                                "Current regex: {0}"
                                .format(self.stats.fields_filter))
+            self.screen.addstr(5, 0, msg)
             self.screen.addstr(3, 0, "New regex: ")
             curses.echo()
             regex = self.screen.getstr().decode(ENCODING)
             curses.noecho()
             if len(regex) == 0:
-                self.stats.fields_filter = DEFAULT_REGEX
-                self.refresh_header()
+                self.stats.fields_filter = ''
+                self._refresh_header()
                 return
             try:
                 re.compile(regex)
                 self.stats.fields_filter = regex
-                self.refresh_header()
+                self._refresh_header()
                 return
             except re.error:
+                msg = '"' + regex + '": Not a valid regular expression'
                 continue
 
-    def show_vm_selection_by_pid(self):
-        """Draws PID selection mask.
-
-        Asks for a pid until a valid pid or 0 has been entered.
-
-        """
-        msg = ''
-        while True:
-            self.screen.erase()
-            self.screen.addstr(0, 0,
-                               'Show statistics for specific pid.',
-                               curses.A_BOLD)
-            self.screen.addstr(1, 0,
-                               'This might limit the shown data to the trace '
-                               'statistics.')
-            self.screen.addstr(5, 0, msg)
-            self.print_all_gnames(7)
-
-            curses.echo()
-            self.screen.addstr(3, 0, "Pid [0 or pid]: ")
-            pid = self.screen.getstr().decode(ENCODING)
-            curses.noecho()
-
-            try:
-                if len(pid) > 0:
-                    pid = int(pid)
-                    if pid != 0 and not os.path.isdir(os.path.join('/proc/',
-                                                                   str(pid))):
-                        msg = '"' + str(pid) + '": Not a running process'
-                        continue
-                else:
-                    pid = 0
-                self.refresh_header(pid)
-                self.update_pid(pid)
-                break
-            except ValueError:
-                msg = '"' + str(pid) + '": Not a valid pid'
-
-    def show_set_update_interval(self):
+    def _show_set_update_interval(self):
         """Draws update interval selection mask."""
         msg = ''
         while True:
@@ -1265,60 +1318,67 @@ class Tui(object):
 
             except ValueError:
                 msg = '"' + str(val) + '": Invalid value'
-        self.refresh_header()
+        self._refresh_header()
 
-    def show_vm_selection_by_guest_name(self):
+    def _show_vm_selection_by_guest(self):
         """Draws guest selection mask.
 
-        Asks for a guest name until a valid guest name or '' is entered.
+        Asks for a guest name or pid until a valid guest name or '' is entered.
 
         """
         msg = ''
         while True:
             self.screen.erase()
             self.screen.addstr(0, 0,
-                               'Show statistics for specific guest.',
+                               'Show statistics for specific guest or pid.',
                                curses.A_BOLD)
             self.screen.addstr(1, 0,
                                'This might limit the shown data to the trace '
                                'statistics.')
             self.screen.addstr(5, 0, msg)
-            self.print_all_gnames(7)
+            self._print_all_gnames(7)
             curses.echo()
-            self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
-            gname = self.screen.getstr().decode(ENCODING)
+            curses.curs_set(1)
+            self.screen.addstr(3, 0, "Guest or pid [ENTER exits]: ")
+            guest = self.screen.getstr().decode(ENCODING)
             curses.noecho()
 
-            if not gname:
-                self.refresh_header(0)
-                self.update_pid(0)
+            pid = 0
+            if not guest or guest == '0':
                 break
-            else:
-                pids = []
-                try:
-                    pids = self.get_pid_from_gname(gname)
-                except:
-                    msg = '"' + gname + '": Internal error while searching, ' \
-                          'use pid filter instead'
-                    continue
-                if len(pids) == 0:
-                    msg = '"' + gname + '": Not an active guest'
+            if guest.isdigit():
+                if not os.path.isdir(os.path.join('/proc/', guest)):
+                    msg = '"' + guest + '": Not a running process'
                     continue
-                if len(pids) > 1:
-                    msg = '"' + gname + '": Multiple matches found, use pid ' \
-                          'filter instead'
-                    continue
-                self.refresh_header(pids[0])
-                self.update_pid(pids[0])
+                pid = int(guest)
                 break
+            pids = []
+            try:
+                pids = self.get_pid_from_gname(guest)
+            except:
+                msg = '"' + guest + '": Internal error while searching, ' \
+                      'use pid filter instead'
+                continue
+            if len(pids) == 0:
+                msg = '"' + guest + '": Not an active guest'
+                continue
+            if len(pids) > 1:
+                msg = '"' + guest + '": Multiple matches found, use pid ' \
+                      'filter instead'
+                continue
+            pid = pids[0]
+            break
+        curses.curs_set(0)
+        self._refresh_header(pid)
+        self._update_pid(pid)
 
     def show_stats(self):
         """Refreshes the screen and processes user input."""
         sleeptime = self._delay_initial
-        self.refresh_header()
+        self._refresh_header()
         start = 0.0  # result based on init value never appears on screen
         while True:
-            self.refresh_body(time.time() - start)
+            self._refresh_body(time.time() - start)
             curses.halfdelay(int(sleeptime * 10))
             start = time.time()
             sleeptime = self._delay_regular
@@ -1327,47 +1387,39 @@ class Tui(object):
                 if char == 'b':
                     self._display_guests = not self._display_guests
                     if self.stats.toggle_display_guests(self._display_guests):
-                        self.show_msg(['Command not available with tracepoints'
-                                       ' enabled', 'Restart with debugfs only '
-                                       '(see option \'-d\') and try again!'])
+                        self._show_msg(['Command not available with '
+                                        'tracepoints enabled', 'Restart with '
+                                        'debugfs only (see option \'-d\') and '
+                                        'try again!'])
                         self._display_guests = not self._display_guests
-                    self.refresh_header()
+                    self._refresh_header()
                 if char == 'c':
-                    self.stats.fields_filter = DEFAULT_REGEX
-                    self.refresh_header(0)
-                    self.update_pid(0)
+                    self.stats.fields_filter = ''
+                    self._refresh_header(0)
+                    self._update_pid(0)
                 if char == 'f':
                     curses.curs_set(1)
-                    self.show_filter_selection()
+                    self._show_filter_selection()
                     curses.curs_set(0)
                     sleeptime = self._delay_initial
-                if char == 'g':
-                    curses.curs_set(1)
-                    self.show_vm_selection_by_guest_name()
-                    curses.curs_set(0)
+                if char == 'g' or char == 'p':
+                    self._show_vm_selection_by_guest()
                     sleeptime = self._delay_initial
                 if char == 'h':
-                    self.show_help_interactive()
+                    self._show_help_interactive()
                 if char == 'o':
                     self._sorting = not self._sorting
-                if char == 'p':
-                    curses.curs_set(1)
-                    self.show_vm_selection_by_pid()
-                    curses.curs_set(0)
-                    sleeptime = self._delay_initial
                 if char == 'q':
                     break
                 if char == 'r':
                     self.stats.reset()
                 if char == 's':
                     curses.curs_set(1)
-                    self.show_set_update_interval()
+                    self._show_set_update_interval()
                     curses.curs_set(0)
                     sleeptime = self._delay_initial
                 if char == 'x':
-                    self.update_drilldown()
-                    # prevents display of current values on next refresh
-                    self.stats.get(self._display_guests)
+                    self.stats.child_events = not self.stats.child_events
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1380,9 +1432,9 @@ def batch(stats):
         s = stats.get()
         time.sleep(1)
         s = stats.get()
-        for key in sorted(s.keys()):
-            values = s[key]
-            print('%-42s%10d%10d' % (key, values[0], values[1]))
+        for key, values in sorted(s.items()):
+            print('%-42s%10d%10d' % (key.split(' ')[0], values.value,
+                  values.delta))
     except KeyboardInterrupt:
         pass
 
@@ -1392,14 +1444,14 @@ def log(stats):
     keys = sorted(stats.get().keys())
 
     def banner():
-        for k in keys:
-            print(k, end=' ')
+        for key in keys:
+            print(key.split(' ')[0], end=' ')
         print()
 
     def statline():
         s = stats.get()
-        for k in keys:
-            print(' %9d' % s[k][1], end=' ')
+        for key in keys:
+            print(' %9d' % s[key].delta, end=' ')
         print()
     line = 0
     banner_repeat = 20
@@ -1504,7 +1556,7 @@ Press any other key to refresh statistics immediately.
                          )
     optparser.add_option('-f', '--fields',
                          action='store',
-                         default=DEFAULT_REGEX,
+                         default='',
                          dest='fields',
                          help='''fields to display (regex)
                                  "-f help" for a list of available events''',
@@ -1539,17 +1591,6 @@ Press any other key to refresh statistics immediately.
 
 def check_access(options):
     """Exits if the current user can't access all needed directories."""
-    if not os.path.exists('/sys/kernel/debug'):
-        sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
-        sys.exit(1)
-
-    if not os.path.exists(PATH_DEBUGFS_KVM):
-        sys.stderr.write("Please make sure, that debugfs is mounted and "
-                         "readable by the current user:\n"
-                         "('mount -t debugfs debugfs /sys/kernel/debug')\n"
-                         "Also ensure, that the kvm modules are loaded.\n")
-        sys.exit(1)
-
     if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints or
                                                      not options.debugfs):
         sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
@@ -1567,7 +1608,33 @@ def check_access(options):
     return options
 
 
+def assign_globals():
+    global PATH_DEBUGFS_KVM
+    global PATH_DEBUGFS_TRACING
+
+    debugfs = ''
+    for line in file('/proc/mounts'):
+        if line.split(' ')[0] == 'debugfs':
+            debugfs = line.split(' ')[1]
+            break
+    if debugfs == '':
+        sys.stderr.write("Please make sure that CONFIG_DEBUG_FS is enabled in "
+                         "your kernel, mounted and\nreadable by the current "
+                         "user:\n"
+                         "('mount -t debugfs debugfs /sys/kernel/debug')\n")
+        sys.exit(1)
+
+    PATH_DEBUGFS_KVM = os.path.join(debugfs, 'kvm')
+    PATH_DEBUGFS_TRACING = os.path.join(debugfs, 'tracing')
+
+    if not os.path.exists(PATH_DEBUGFS_KVM):
+        sys.stderr.write("Please make sure that CONFIG_KVM is enabled in "
+                         "your kernel and that the modules are loaded.\n")
+        sys.exit(1)
+
+
 def main():
+    assign_globals()
     options = get_options()
     options = check_access(options)
 
index b5b3810c9e945d7f3a39568840fbc5b73f84983b..0811d860fe75000de852c569ccaaa3ae1f0aa944 100644 (file)
@@ -35,13 +35,13 @@ INTERACTIVE COMMANDS
 
 *f*::  filter by regular expression
 
-*g*::  filter by guest name
+*g*::  filter by guest name/PID
 
 *h*::  display interactive commands reference
 
 *o*::   toggle sorting order (Total vs CurAvg/s)
 
-*p*::  filter by PID
+*p*::  filter by guest name/PID
 
 *q*::  quit
 
index 5f758c489a208c09fd09274b0b01302d61d56f27..b572d94255f66c518ebd201eab3fb80b8735d0d4 100644 (file)
@@ -2,7 +2,6 @@
 PREFIX ?= /usr
 SBINDIR ?= sbin
 INSTALL ?= install
-CC = $(CROSS_COMPILE)gcc
 
 TARGET = freefall
 
index c379af003807ad6606d432e98e8c2dd413c04e7d..7b6bed13daaaed8b68abcc4b9b2aa8eb96f7974d 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for LEDs tools
 
-CC = $(CROSS_COMPILE)gcc
 CFLAGS = -Wall -Wextra -g -I../../include/uapi
 
 all: uledmon led_hw_brightness_mon
index 97073d649c1a0add9c98f8cc363aa8debad6cff1..5bbbf285af74a0afb01ae4dcaba9a7cdbb3a3e93 100644 (file)
@@ -1060,11 +1060,12 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
                prog->insns = new_insn;
                prog->main_prog_cnt = prog->insns_cnt;
                prog->insns_cnt = new_cnt;
+               pr_debug("added %zd insn from %s to prog %s\n",
+                        text->insns_cnt, text->section_name,
+                        prog->section_name);
        }
        insn = &prog->insns[relo->insn_idx];
        insn->imm += prog->main_prog_cnt - relo->insn_idx;
-       pr_debug("added %zd insn from %s to prog %s\n",
-                text->insns_cnt, text->section_name, prog->section_name);
        return 0;
 }
 
index 57254f5b2779fb02276f00eb82401a05eaaabeb0..694abc628e9b3060b2252c1e8c86af4d6176e518 100644 (file)
@@ -29,7 +29,7 @@
 #include "builtin.h"
 #include "check.h"
 
-bool no_fp, no_unreachable;
+bool no_fp, no_unreachable, retpoline, module;
 
 static const char * const check_usage[] = {
        "objtool check [<options>] file.o",
@@ -39,6 +39,8 @@ static const char * const check_usage[] = {
 const struct option check_options[] = {
        OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
        OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
+       OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
+       OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
        OPT_END(),
 };
 
@@ -53,5 +55,5 @@ int cmd_check(int argc, const char **argv)
 
        objname = argv[0];
 
-       return check(objname, no_fp, no_unreachable, false);
+       return check(objname, false);
 }
index 91e8e19ff5e06193adc55c455b4d97ad947da7ee..77ea2b97117d2fd586e52294593c4d0c23a55ecd 100644 (file)
@@ -25,7 +25,6 @@
  */
 
 #include <string.h>
-#include <subcmd/parse-options.h>
 #include "builtin.h"
 #include "check.h"
 
@@ -36,9 +35,6 @@ static const char *orc_usage[] = {
        NULL,
 };
 
-extern const struct option check_options[];
-extern bool no_fp, no_unreachable;
-
 int cmd_orc(int argc, const char **argv)
 {
        const char *objname;
@@ -54,7 +50,7 @@ int cmd_orc(int argc, const char **argv)
 
                objname = argv[0];
 
-               return check(objname, no_fp, no_unreachable, true);
+               return check(objname, true);
        }
 
        if (!strcmp(argv[0], "dump")) {
index dd526067fed5ebcacbb45b7ce8a686437bd83d1a..28ff40e19a1413823b9b06ae4d1f1b42d922e850 100644 (file)
 #ifndef _BUILTIN_H
 #define _BUILTIN_H
 
+#include <subcmd/parse-options.h>
+
+extern const struct option check_options[];
+extern bool no_fp, no_unreachable, retpoline, module;
+
 extern int cmd_check(int argc, const char **argv);
 extern int cmd_orc(int argc, const char **argv);
 
index a8cb69a2657658ec41c5877f1b4a47526c8079b0..472e64e95891e671b0c13c0d851e6defdc8e5b2c 100644 (file)
@@ -18,6 +18,7 @@
 #include <string.h>
 #include <stdlib.h>
 
+#include "builtin.h"
 #include "check.h"
 #include "elf.h"
 #include "special.h"
@@ -33,7 +34,6 @@ struct alternative {
 };
 
 const char *objname;
-static bool no_fp;
 struct cfi_state initial_func_cfi;
 
 struct instruction *find_insn(struct objtool_file *file,
@@ -497,6 +497,7 @@ static int add_jump_destinations(struct objtool_file *file)
                         * disguise, so convert them accordingly.
                         */
                        insn->type = INSN_JUMP_DYNAMIC;
+                       insn->retpoline_safe = true;
                        continue;
                } else {
                        /* sibling call */
@@ -548,7 +549,8 @@ static int add_call_destinations(struct objtool_file *file)
                        if (!insn->call_dest && !insn->ignore) {
                                WARN_FUNC("unsupported intra-function call",
                                          insn->sec, insn->offset);
-                               WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+                               if (retpoline)
+                                       WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
                                return -1;
                        }
 
@@ -1108,6 +1110,54 @@ static int read_unwind_hints(struct objtool_file *file)
        return 0;
 }
 
+static int read_retpoline_hints(struct objtool_file *file)
+{
+       struct section *sec, *relasec;
+       struct instruction *insn;
+       struct rela *rela;
+       int i;
+
+       sec = find_section_by_name(file->elf, ".discard.retpoline_safe");
+       if (!sec)
+               return 0;
+
+       relasec = sec->rela;
+       if (!relasec) {
+               WARN("missing .rela.discard.retpoline_safe section");
+               return -1;
+       }
+
+       if (sec->len % sizeof(unsigned long)) {
+               WARN("retpoline_safe size mismatch: %d %ld", sec->len, sizeof(unsigned long));
+               return -1;
+       }
+
+       for (i = 0; i < sec->len / sizeof(unsigned long); i++) {
+               rela = find_rela_by_dest(sec, i * sizeof(unsigned long));
+               if (!rela) {
+                       WARN("can't find rela for retpoline_safe[%d]", i);
+                       return -1;
+               }
+
+               insn = find_insn(file, rela->sym->sec, rela->addend);
+               if (!insn) {
+                       WARN("can't find insn for retpoline_safe[%d]", i);
+                       return -1;
+               }
+
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC) {
+                       WARN_FUNC("retpoline_safe hint not a indirect jump/call",
+                                 insn->sec, insn->offset);
+                       return -1;
+               }
+
+               insn->retpoline_safe = true;
+       }
+
+       return 0;
+}
+
 static int decode_sections(struct objtool_file *file)
 {
        int ret;
@@ -1146,6 +1196,10 @@ static int decode_sections(struct objtool_file *file)
        if (ret)
                return ret;
 
+       ret = read_retpoline_hints(file);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -1891,6 +1945,38 @@ static int validate_unwind_hints(struct objtool_file *file)
        return warnings;
 }
 
+static int validate_retpoline(struct objtool_file *file)
+{
+       struct instruction *insn;
+       int warnings = 0;
+
+       for_each_insn(file, insn) {
+               if (insn->type != INSN_JUMP_DYNAMIC &&
+                   insn->type != INSN_CALL_DYNAMIC)
+                       continue;
+
+               if (insn->retpoline_safe)
+                       continue;
+
+               /*
+                * .init.text code is ran before userspace and thus doesn't
+                * strictly need retpolines, except for modules which are
+                * loaded late, they very much do need retpoline in their
+                * .init.text
+                */
+               if (!strcmp(insn->sec->name, ".init.text") && !module)
+                       continue;
+
+               WARN_FUNC("indirect %s found in RETPOLINE build",
+                         insn->sec, insn->offset,
+                         insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+
+               warnings++;
+       }
+
+       return warnings;
+}
+
 static bool is_kasan_insn(struct instruction *insn)
 {
        return (insn->type == INSN_CALL &&
@@ -2022,13 +2108,12 @@ static void cleanup(struct objtool_file *file)
        elf_close(file->elf);
 }
 
-int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
+int check(const char *_objname, bool orc)
 {
        struct objtool_file file;
        int ret, warnings = 0;
 
        objname = _objname;
-       no_fp = _no_fp;
 
        file.elf = elf_open(objname, orc ? O_RDWR : O_RDONLY);
        if (!file.elf)
@@ -2052,6 +2137,13 @@ int check(const char *_objname, bool _no_fp, bool no_unreachable, bool orc)
        if (list_empty(&file.insn_list))
                goto out;
 
+       if (retpoline) {
+               ret = validate_retpoline(&file);
+               if (ret < 0)
+                       return ret;
+               warnings += ret;
+       }
+
        ret = validate_functions(&file);
        if (ret < 0)
                goto out;
index 23a1d065cae190c11432bd2c768f38c7f91e470b..c6b68fcb926ff76c6e44675ff636c16f3552a184 100644 (file)
@@ -45,6 +45,7 @@ struct instruction {
        unsigned char type;
        unsigned long immediate;
        bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+       bool retpoline_safe;
        struct symbol *call_dest;
        struct instruction *jump_dest;
        struct instruction *first_jump_src;
@@ -63,7 +64,7 @@ struct objtool_file {
        bool ignore_unreachables, c_file, hints;
 };
 
-int check(const char *objname, bool no_fp, bool no_unreachable, bool orc);
+int check(const char *objname, bool orc);
 
 struct instruction *find_insn(struct objtool_file *file,
                              struct section *sec, unsigned long offset);
index 9b0351d3ce348844ce6d857c1f9158ed0810dd1e..01232803859406b180eed3d19739f88262fc3e81 100644 (file)
@@ -146,12 +146,6 @@ define allow-override
     $(eval $(1) = $(2)))
 endef
 
-# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-$(call allow-override,LD,$(CROSS_COMPILE)ld)
-$(call allow-override,CXX,$(CROSS_COMPILE)g++)
-
 LD += $(EXTRA_LDFLAGS)
 
 HOSTCC  ?= gcc
index a1883bbb014478d9239d24fda259c61bde853b23..2cccbba644187fd7815e2a3fb38f91308cf04de4 100644 (file)
@@ -56,9 +56,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
 # to compile vs uClibc, that can be done here as well.
 CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
 CROSS_COMPILE ?= $(CROSS)
-CC = $(CROSS_COMPILE)gcc
-LD = $(CROSS_COMPILE)gcc
-STRIP = $(CROSS_COMPILE)strip
 HOSTCC = gcc
 
 # check if compiler option is supported
index fcb3ed0be5f81ea89585af09b814ce3b0a7610c3..dd614463d4d6978d54afc1007c93ce6fdb0e57d1 100644 (file)
@@ -42,6 +42,24 @@ EXTRA_WARNINGS += -Wformat
 
 CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
 
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+  $(if $(or $(findstring environment,$(origin $(1))),\
+            $(findstring command line,$(origin $(1)))),,\
+    $(eval $(1) = $(2)))
+endef
+
+# Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
+$(call allow-override,CXX,$(CROSS_COMPILE)g++)
+$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
+
 ifeq ($(CC_NO_CLANG), 1)
 EXTRA_WARNINGS += -Wstrict-aliasing=3
 endif
index 90615e10c79af1c766f0d9a629bdac862f7cb507..815d155891779ddddeec284b5c5ce5a0c530cb98 100644 (file)
@@ -11,8 +11,6 @@ endif
 # (this improves performance and avoids hard-to-debug behaviour);
 MAKEFLAGS += -r
 
-CC = $(CROSS_COMPILE)gcc
-LD = $(CROSS_COMPILE)ld
 CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
 
 ALL_TARGETS := spidev_test spidev_fdx
index 44ef9eba5a7a2181004676cfa28f330cfb576b6a..6c645eb77d4218fd102968fb2a24b19350e3edd9 100644 (file)
@@ -178,6 +178,55 @@ void idr_get_next_test(int base)
        idr_destroy(&idr);
 }
 
+int idr_u32_cb(int id, void *ptr, void *data)
+{
+       BUG_ON(id < 0);
+       BUG_ON(ptr != DUMMY_PTR);
+       return 0;
+}
+
+void idr_u32_test1(struct idr *idr, u32 handle)
+{
+       static bool warned = false;
+       u32 id = handle;
+       int sid = 0;
+       void *ptr;
+
+       BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
+       BUG_ON(id != handle);
+       BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
+       BUG_ON(id != handle);
+       if (!warned && id > INT_MAX)
+               printk("vvv Ignore these warnings\n");
+       ptr = idr_get_next(idr, &sid);
+       if (id > INT_MAX) {
+               BUG_ON(ptr != NULL);
+               BUG_ON(sid != 0);
+       } else {
+               BUG_ON(ptr != DUMMY_PTR);
+               BUG_ON(sid != id);
+       }
+       idr_for_each(idr, idr_u32_cb, NULL);
+       if (!warned && id > INT_MAX) {
+               printk("^^^ Warnings over\n");
+               warned = true;
+       }
+       BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
+       BUG_ON(!idr_is_empty(idr));
+}
+
+void idr_u32_test(int base)
+{
+       DEFINE_IDR(idr);
+       idr_init_base(&idr, base);
+       idr_u32_test1(&idr, 10);
+       idr_u32_test1(&idr, 0x7fffffff);
+       idr_u32_test1(&idr, 0x80000000);
+       idr_u32_test1(&idr, 0x80000001);
+       idr_u32_test1(&idr, 0xffe00000);
+       idr_u32_test1(&idr, 0xffffffff);
+}
+
 void idr_checks(void)
 {
        unsigned long i;
@@ -248,6 +297,9 @@ void idr_checks(void)
        idr_get_next_test(0);
        idr_get_next_test(1);
        idr_get_next_test(4);
+       idr_u32_test(4);
+       idr_u32_test(1);
+       idr_u32_test(0);
 }
 
 /*
index 6903ccf35595f560350a26d03e3eb4369fbb128b..44a0d1ad4408429d67378bf185773f77b3461ab8 100644 (file)
@@ -29,7 +29,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
 {
        struct radix_tree_node *node;
 
-       if (flags & __GFP_NOWARN)
+       if (!(flags & __GFP_DIRECT_RECLAIM))
                return NULL;
 
        pthread_mutex_lock(&cachep->lock);
@@ -73,10 +73,17 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 
 void *kmalloc(size_t size, gfp_t gfp)
 {
-       void *ret = malloc(size);
+       void *ret;
+
+       if (!(gfp & __GFP_DIRECT_RECLAIM))
+               return NULL;
+
+       ret = malloc(size);
        uatomic_inc(&nr_allocated);
        if (kmalloc_verbose)
                printf("Allocating %p from malloc\n", ret);
+       if (gfp & __GFP_ZERO)
+               memset(ret, 0, size);
        return ret;
 }
 
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
new file mode 100644 (file)
index 0000000..e69de29
index e9fff59dfd8a86cb6022d0dc6e5321a3d9a4f636..e3201ccf54c3c46dc7ffaf9bc6185edd406a6b95 100644 (file)
@@ -11,6 +11,7 @@
 #define __GFP_IO               0x40u
 #define __GFP_FS               0x80u
 #define __GFP_NOWARN           0x200u
+#define __GFP_ZERO             0x8000u
 #define __GFP_ATOMIC           0x80000u
 #define __GFP_ACCOUNT          0x100000u
 #define __GFP_DIRECT_RECLAIM   0x400000u
index 979baeec7e706aee05ef6bdfbf8dd91494284dd2..a037def0dec637ef9a38b034de5ac1fed4d32d8e 100644 (file)
@@ -3,6 +3,7 @@
 #define SLAB_H
 
 #include <linux/types.h>
+#include <linux/gfp.h>
 
 #define SLAB_HWCACHE_ALIGN 1
 #define SLAB_PANIC 2
 void *kmalloc(size_t size, gfp_t);
 void kfree(void *);
 
+static inline void *kzalloc(size_t size, gfp_t gfp)
+{
+        return kmalloc(size, gfp | __GFP_ZERO);
+}
+
 void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
 
index 1a74922689930ce1829afaa5f91b701f97861b07..f6304d2be90c16e7b818415d867704abda3ecf09 100644 (file)
@@ -11,11 +11,11 @@ all:
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
-               #SUBDIR test prog name should be in the form: SUBDIR_test.sh
+               #SUBDIR test prog name should be in the form: SUBDIR_test.sh \
                TEST=$$DIR"_test.sh"; \
-               if [ -e $$DIR/$$TEST ]; then
-                       rsync -a $$DIR/$$TEST $$BUILD_TARGET/;
-               fi
+               if [ -e $$DIR/$$TEST ]; then \
+                       rsync -a $$DIR/$$TEST $$BUILD_TARGET/; \
+               fi \
        done
 
 override define RUN_TESTS
index cc15af2e54fe8651e7b88caaccf8481871c983bc..9cf83f895d98873ec81044ce4c4c0ecb89ac5ec3 100644 (file)
@@ -11,3 +11,4 @@ test_progs
 test_tcpbpf_user
 test_verifier_log
 feature
+test_libbpf_open
index 436c4c72414f4b3720be09fc00c88ccea564b1e3..9e03a4c356a496e3f57d18058254df464c206278 100644 (file)
@@ -126,6 +126,8 @@ static void test_hashmap_sizes(int task, void *data)
                        fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j,
                                            2, map_flags);
                        if (fd < 0) {
+                               if (errno == ENOMEM)
+                                       return;
                                printf("Failed to create hashmap key=%d value=%d '%s'\n",
                                       i, j, strerror(errno));
                                exit(1);
index 57119ad57a3fbf6e1e8b1b35d9c74b9ab201715e..3e645ee41ed5fcc033266645b8e893c143fb79fc 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/if_ether.h>
 #include <linux/if_packet.h>
 #include <linux/ip.h>
-#include <linux/in6.h>
 #include <linux/types.h>
 #include <linux/socket.h>
 #include <linux/tcp.h>
index c0f16e93f9bd14b3ef98b323c6830ca14645509b..c73592fa3d4174815de60cbc8ecb411c95cffd40 100644 (file)
@@ -2586,6 +2586,32 @@ static struct bpf_test tests[] = {
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
+       {
+               "runtime/jit: pass negative index to tail_call",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_3, -1),
+                       BPF_LD_MAP_FD(BPF_REG_2, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_tail_call),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_prog = { 1 },
+               .result = ACCEPT,
+       },
+       {
+               "runtime/jit: pass > 32bit index to tail_call",
+               .insns = {
+                       BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
+                       BPF_LD_MAP_FD(BPF_REG_2, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_tail_call),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_prog = { 2 },
+               .result = ACCEPT,
+       },
        {
                "stack pointer arithmetic",
                .insns = {
index cea4adcd42b8877f7e5a05d57a837bcc61c1d97d..a63e8453984d2793eb38b706236f31dc49527e9b 100644 (file)
@@ -12,9 +12,9 @@ all:
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
-               if [ -e $$DIR/$(TEST_PROGS) ]; then
-                       rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
-               fi
+               if [ -e $$DIR/$(TEST_PROGS) ]; then \
+                       rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
+               fi \
        done
 
 override define RUN_TESTS
index a5276a91dfbfc91806d737d561acbbfd2bc7f748..0862e6f47a38c8ae56870809c87806cca0e39c95 100644 (file)
@@ -5,6 +5,7 @@ CFLAGS += -I../../../../include/
 CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_tests.sh
+TEST_FILES := run_fuse_test.sh
 TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
 
 fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
diff --git a/tools/testing/selftests/memfd/config b/tools/testing/selftests/memfd/config
new file mode 100644 (file)
index 0000000..835c7f4
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_FUSE_FS=m
index 86636d207adf72c12403355cae88d3bfe819fd80..183b46883875e8622bb91f1372f5458ea9aedbaf 100644 (file)
@@ -4,7 +4,7 @@ all:
 include ../lib.mk
 
 TEST_PROGS := mem-on-off-test.sh
-override RUN_TESTS := ./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
+override RUN_TESTS := @./mem-on-off-test.sh -r 2 && echo "selftests: memory-hotplug [PASS]" || echo "selftests: memory-hotplug [FAIL]"
 override EMIT_TESTS := echo "$(RUN_TESTS)"
 
 run_full_test:
index 6a8e5a9bfc1065a3e860cd5b31e7aea2d508bea8..d148f9f89fb64cf325a546a1855c1e1cc5fc8a21 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_MISC_FILESYSTEMS=y
 CONFIG_PSTORE=y
 CONFIG_PSTORE_PMSG=y
 CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=m
index 0b457e8e0f0c103281b5b55006729ffac77ad8f0..5df609950a666f8000307653a0b1e8d4368549a2 100644 (file)
@@ -141,6 +141,15 @@ struct seccomp_data {
 #define SECCOMP_FILTER_FLAG_LOG 2
 #endif
 
+#ifndef PTRACE_SECCOMP_GET_METADATA
+#define PTRACE_SECCOMP_GET_METADATA    0x420d
+
+struct seccomp_metadata {
+       __u64 filter_off;       /* Input: which filter */
+       __u64 flags;             /* Output: filter's flags */
+};
+#endif
+
 #ifndef seccomp
 int seccomp(unsigned int op, unsigned int flags, void *args)
 {
@@ -2845,6 +2854,58 @@ TEST(get_action_avail)
        EXPECT_EQ(errno, EOPNOTSUPP);
 }
 
+TEST(get_metadata)
+{
+       pid_t pid;
+       int pipefd[2];
+       char buf;
+       struct seccomp_metadata md;
+
+       ASSERT_EQ(0, pipe(pipefd));
+
+       pid = fork();
+       ASSERT_GE(pid, 0);
+       if (pid == 0) {
+               struct sock_filter filter[] = {
+                       BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+               };
+               struct sock_fprog prog = {
+                       .len = (unsigned short)ARRAY_SIZE(filter),
+                       .filter = filter,
+               };
+
+               /* one with log, one without */
+               ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
+                                    SECCOMP_FILTER_FLAG_LOG, &prog));
+               ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
+
+               ASSERT_EQ(0, close(pipefd[0]));
+               ASSERT_EQ(1, write(pipefd[1], "1", 1));
+               ASSERT_EQ(0, close(pipefd[1]));
+
+               while (1)
+                       sleep(100);
+       }
+
+       ASSERT_EQ(0, close(pipefd[1]));
+       ASSERT_EQ(1, read(pipefd[0], &buf, 1));
+
+       ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
+       ASSERT_EQ(pid, waitpid(pid, NULL, 0));
+
+       md.filter_off = 0;
+       ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
+       EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
+       EXPECT_EQ(md.filter_off, 0);
+
+       md.filter_off = 1;
+       ASSERT_EQ(sizeof(md), ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md));
+       EXPECT_EQ(md.flags, 0);
+       EXPECT_EQ(md.filter_off, 1);
+
+       ASSERT_EQ(0, kill(pid, SIGKILL));
+}
+
 /*
  * TODO:
  * - add microbenchmarks
index b3c8ba3cb66855ff93d6581f7428982be41fca60..d0121a8a3523a948af699e52b6adbf1cde37b030 100644 (file)
@@ -30,7 +30,7 @@ $(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
        $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
 
 $(OBJS): $(OUTPUT)/%.o: %.c
-       $(CC) -c $^ -o $@
+       $(CC) -c $^ -o $@ $(CFLAGS)
 
 $(TESTS): $(OUTPUT)/%.o: %.c
        $(CC) -c $^ -o $@
index 3d5a62ff7d31ed437fc5457a501ec9b606839f92..f5d7a7851e2177b315111f4e8ae3f0b0a716487d 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
+include ../lib.mk
+
 ifndef CROSS_COMPILE
 CFLAGS := -std=gnu99
 CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
@@ -6,16 +8,14 @@ ifeq ($(CONFIG_X86_32),y)
 LDLIBS += -lgcc_s
 endif
 
-TEST_PROGS := vdso_test vdso_standalone_test_x86
+TEST_PROGS := $(OUTPUT)/vdso_test $(OUTPUT)/vdso_standalone_test_x86
 
 all: $(TEST_PROGS)
-vdso_test: parse_vdso.c vdso_test.c
-vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
+$(OUTPUT)/vdso_test: parse_vdso.c vdso_test.c
+$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
        $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
                vdso_standalone_test_x86.c parse_vdso.c \
-               -o vdso_standalone_test_x86
+               -o $@
 
-include ../lib.mk
-clean:
-       rm -fr $(TEST_PROGS)
+EXTRA_CLEAN := $(TEST_PROGS)
 endif
index 63c94d776e8970de2703b7416a76bffdeb101361..342c7bc9dc8c5d29441e27da5131d5312ea4bca8 100644 (file)
@@ -11,3 +11,4 @@ mlock-intersect-test
 mlock-random-test
 virtual_address_range
 gup_benchmark
+va_128TBswitch
index 4e6506078494fbe160ea511b91dfcddd5768793b..01d758d73b6db1b442d85387871442cdf9c39017 100644 (file)
@@ -1,7 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for USB tools
 
-CC = $(CROSS_COMPILE)gcc
 PTHREAD_LIBS = -lpthread
 WARNINGS = -Wall -Wextra
 CFLAGS = $(WARNINGS) -g -I../include
index be320b905ea75db150da97dfac999b0f7e90b47b..20f6cf04377f0c257aeb4bacb3133b6a35e9e9b6 100644 (file)
@@ -6,7 +6,6 @@ TARGETS=page-types slabinfo page_owner_sort
 LIB_DIR = ../lib/api
 LIBS = $(LIB_DIR)/libapi.a
 
-CC = $(CROSS_COMPILE)gcc
 CFLAGS = -Wall -Wextra -I../lib/
 LDFLAGS = $(LIBS)
 
index e664f1167388a99a76828981237e38049c6c193f..e0e87239126b5494e358ade6083658a2b7b9ad6a 100644 (file)
@@ -2,7 +2,6 @@ PREFIX ?= /usr
 SBINDIR ?= sbin
 INSTALL ?= install
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
-CC = $(CROSS_COMPILE)gcc
 
 TARGET = dell-smbios-example
 
index 70268c0bec799c0ce85c2f27267e0ab514c5f852..70f4c30918eb2682b638c326977b50e3568c7638 100644 (file)
@@ -36,6 +36,8 @@ static struct timecounter *timecounter;
 static unsigned int host_vtimer_irq;
 static u32 host_vtimer_irq_flags;
 
+static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
+
 static const struct kvm_irq_level default_ptimer_irq = {
        .irq    = 30,
        .level  = 1,
@@ -56,6 +58,12 @@ u64 kvm_phys_timer_read(void)
        return timecounter->cc->read(timecounter->cc);
 }
 
+static inline bool userspace_irqchip(struct kvm *kvm)
+{
+       return static_branch_unlikely(&userspace_irqchip_in_use) &&
+               unlikely(!irqchip_in_kernel(kvm));
+}
+
 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
 {
        hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
@@ -69,25 +77,6 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
                cancel_work_sync(work);
 }
 
-static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu)
-{
-       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-
-       /*
-        * When using a userspace irqchip with the architected timers, we must
-        * prevent continuously exiting from the guest, and therefore mask the
-        * physical interrupt by disabling it on the host interrupt controller
-        * when the virtual level is high, such that the guest can make
-        * forward progress.  Once we detect the output level being
-        * de-asserted, we unmask the interrupt again so that we exit from the
-        * guest when the timer fires.
-        */
-       if (vtimer->irq.level)
-               disable_percpu_irq(host_vtimer_irq);
-       else
-               enable_percpu_irq(host_vtimer_irq, 0);
-}
-
 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
        struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
@@ -106,9 +95,9 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
        if (kvm_timer_should_fire(vtimer))
                kvm_timer_update_irq(vcpu, true, vtimer);
 
-       if (static_branch_unlikely(&userspace_irqchip_in_use) &&
-           unlikely(!irqchip_in_kernel(vcpu->kvm)))
-               kvm_vtimer_update_mask_user(vcpu);
+       if (userspace_irqchip(vcpu->kvm) &&
+           !static_branch_unlikely(&has_gic_active_state))
+               disable_percpu_irq(host_vtimer_irq);
 
        return IRQ_HANDLED;
 }
@@ -290,8 +279,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
        trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
                                   timer_ctx->irq.level);
 
-       if (!static_branch_unlikely(&userspace_irqchip_in_use) ||
-           likely(irqchip_in_kernel(vcpu->kvm))) {
+       if (!userspace_irqchip(vcpu->kvm)) {
                ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
                                          timer_ctx->irq.irq,
                                          timer_ctx->irq.level,
@@ -350,12 +338,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
        phys_timer_emulate(vcpu);
 }
 
-static void __timer_snapshot_state(struct arch_timer_context *timer)
-{
-       timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-       timer->cnt_cval = read_sysreg_el0(cntv_cval);
-}
-
 static void vtimer_save_state(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -367,8 +349,10 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
        if (!vtimer->loaded)
                goto out;
 
-       if (timer->enabled)
-               __timer_snapshot_state(vtimer);
+       if (timer->enabled) {
+               vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
+               vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
+       }
 
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
@@ -460,23 +444,43 @@ static void set_cntvoff(u64 cntvoff)
        kvm_call_hyp(__kvm_timer_set_cntvoff, low, high);
 }
 
-static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
+static inline void set_vtimer_irq_phys_active(struct kvm_vcpu *vcpu, bool active)
+{
+       int r;
+       r = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, active);
+       WARN_ON(r);
+}
+
+static void kvm_timer_vcpu_load_gic(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
        bool phys_active;
-       int ret;
 
-       phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
-
-       ret = irq_set_irqchip_state(host_vtimer_irq,
-                                   IRQCHIP_STATE_ACTIVE,
-                                   phys_active);
-       WARN_ON(ret);
+       if (irqchip_in_kernel(vcpu->kvm))
+               phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
+       else
+               phys_active = vtimer->irq.level;
+       set_vtimer_irq_phys_active(vcpu, phys_active);
 }
 
-static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu)
+static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
 {
-       kvm_vtimer_update_mask_user(vcpu);
+       struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+
+       /*
+        * When using a userspace irqchip with the architected timers and a
+        * host interrupt controller that doesn't support an active state, we
+        * must still prevent continuously exiting from the guest, and
+        * therefore mask the physical interrupt by disabling it on the host
+        * interrupt controller when the virtual level is high, such that the
+        * guest can make forward progress.  Once we detect the output level
+        * being de-asserted, we unmask the interrupt again so that we exit
+        * from the guest when the timer fires.
+        */
+       if (vtimer->irq.level)
+               disable_percpu_irq(host_vtimer_irq);
+       else
+               enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
 }
 
 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
@@ -487,10 +491,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
        if (unlikely(!timer->enabled))
                return;
 
-       if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
-               kvm_timer_vcpu_load_user(vcpu);
+       if (static_branch_likely(&has_gic_active_state))
+               kvm_timer_vcpu_load_gic(vcpu);
        else
-               kvm_timer_vcpu_load_vgic(vcpu);
+               kvm_timer_vcpu_load_nogic(vcpu);
 
        set_cntvoff(vtimer->cntvoff);
 
@@ -555,18 +559,24 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
 {
        struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
 
-       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
-               __timer_snapshot_state(vtimer);
-               if (!kvm_timer_should_fire(vtimer)) {
-                       kvm_timer_update_irq(vcpu, false, vtimer);
-                       kvm_vtimer_update_mask_user(vcpu);
-               }
+       if (!kvm_timer_should_fire(vtimer)) {
+               kvm_timer_update_irq(vcpu, false, vtimer);
+               if (static_branch_likely(&has_gic_active_state))
+                       set_vtimer_irq_phys_active(vcpu, false);
+               else
+                       enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
        }
 }
 
 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
 {
-       unmask_vtimer_irq_user(vcpu);
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+       if (unlikely(!timer->enabled))
+               return;
+
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+               unmask_vtimer_irq_user(vcpu);
 }
 
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -753,6 +763,8 @@ int kvm_timer_hyp_init(bool has_gic)
                        kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
                        goto out_free_irq;
                }
+
+               static_branch_enable(&has_gic_active_state);
        }
 
        kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
index 4501e658e8d6fc97f39b54bceff8f8b1f36cb95a..65dea3ffef68ede4ca8d8e5bb7d9c2d3edd3352a 100644 (file)
@@ -969,8 +969,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                /* Check for overlaps */
                r = -EEXIST;
                kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
-                       if ((slot->id >= KVM_USER_MEM_SLOTS) ||
-                           (slot->id == id))
+                       if (slot->id == id)
                                continue;
                        if (!((base_gfn + npages <= slot->base_gfn) ||
                              (base_gfn >= slot->base_gfn + slot->npages)))