Merge tag 'for-linus-20180920' of git://git.kernel.dk/linux-block
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Sep 2018 07:41:05 +0000 (09:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Sep 2018 07:41:05 +0000 (09:41 +0200)
Jens writes:
  "Storage fixes for 4.19-rc5

  - Fix for leaking kernel pointer in floppy ioctl (Andy Whitcroft)

  - NVMe pull request from Christoph, and a single ANA log page fix
    (Hannes)

  - Regression fix for libata qd32 support, where we trigger an illegal
    active command transition. This fixes a CD-ROM detection issue that
    was reported, but could also trigger premature completion of the
    internal tag (me)"

* tag 'for-linus-20180920' of git://git.kernel.dk/linux-block:
  floppy: Do not copy a kernel pointer to user memory in FDGETPRM ioctl
  libata: mask swap internal and hardware tag
  nvme: count all ANA groups for ANA Log page

1165 files changed:
.mailmap
Documentation/ABI/stable/sysfs-bus-xen-backend
Documentation/ABI/stable/sysfs-devices-system-xen_memory
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/sve.txt
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/arm/cpu-capacity.txt
Documentation/devicetree/bindings/arm/idle-states.txt
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/net/sh_eth.txt
Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
Documentation/early-userspace/README
Documentation/filesystems/ramfs-rootfs-initramfs.txt
Documentation/filesystems/vfs.txt
Documentation/hwmon/ina2xx
Documentation/i2c/DMA-considerations
Documentation/kbuild/kconfig-language.txt
Documentation/kbuild/makefiles.txt
Documentation/process/changes.rst
Documentation/process/code-of-conduct.rst [new file with mode: 0644]
Documentation/process/code-of-conflict.rst [deleted file]
Documentation/process/index.rst
Documentation/scsi/scsi-parameters.txt
Documentation/x86/earlyprintk.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/dma-mapping.h [new file with mode: 0644]
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/Makefile
arch/arm/boot/dts/am335x-osd3358-sm-red.dts [changed mode: 0755->0644]
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/imx23-evk.dts
arch/arm/boot/dts/imx28-evk.dts
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/sama5d3_emac.dtsi
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/mxs_defconfig
arch/arm/configs/versatile_defconfig
arch/arm/include/asm/kvm_host.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-rockchip/Kconfig
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
arch/arm64/boot/dts/arm/juno-r1.dts
arch/arm64/boot/dts/arm/juno-r2.dts
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/arm64/boot/dts/mediatek/mt2712e.dtsi
arch/arm64/boot/dts/sprd/sc9860.dtsi
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/crypto/sm4-ce-glue.c
arch/arm64/include/asm/jump_label.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/crash_core.c [new file with mode: 0644]
arch/arm64/kernel/machine_kexec.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/mmu.c
arch/c6x/Makefile
arch/h8300/Makefile
arch/hexagon/Makefile
arch/hexagon/include/asm/bitops.h
arch/hexagon/kernel/dma.c
arch/m68k/Makefile
arch/m68k/mac/misc.c
arch/m68k/mm/mcfmmu.c
arch/microblaze/Makefile
arch/mips/Makefile
arch/mips/boot/compressed/Makefile
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
arch/mips/kernel/vdso.c
arch/mips/kvm/mmu.c
arch/mips/lantiq/xway/dma.c
arch/mips/lasat/image/Makefile
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/elf.h
arch/nds32/include/asm/ftrace.h [new file with mode: 0644]
arch/nds32/include/asm/nds32.h
arch/nds32/include/asm/uaccess.h
arch/nds32/kernel/Makefile
arch/nds32/kernel/atl2c.c
arch/nds32/kernel/ex-entry.S
arch/nds32/kernel/ex-exit.S
arch/nds32/kernel/ftrace.c [new file with mode: 0644]
arch/nds32/kernel/module.c
arch/nds32/kernel/stacktrace.c
arch/nds32/kernel/traps.c
arch/nds32/kernel/vmlinux.lds.S
arch/nios2/Kconfig.debug
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/platforms/powernv/vas-window.c
arch/riscv/Makefile
arch/riscv/include/asm/tlb.h
arch/riscv/kernel/setup.c
arch/riscv/kernel/sys_riscv.c
arch/s390/Makefile
arch/s390/crypto/paes_s390.c
arch/s390/include/asm/mmu.h
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/sh/Makefile
arch/sparc/Makefile
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/of_device_64.c
arch/um/Makefile
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/Makefile.um
arch/x86/boot/compressed/Makefile
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-glue.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/morus1280-sse2-glue.c
arch/x86/crypto/morus640-sse2-glue.c
arch/x86/entry/vdso/Makefile
arch/x86/events/core.c
arch/x86/events/intel/lbr.c
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/set_memory.h
arch/x86/include/asm/signal.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/vgtod.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce-internal.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/eisa.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/topology.c
arch/x86/kernel/tsc.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/usercopy.c
arch/x86/mm/cpu_entry_area.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/mmap.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pgtable.c
arch/x86/mm/pti.c
arch/x86/mm/tlb.c
arch/x86/platform/efi/efi_32.c
arch/x86/xen/mmu_pv.c
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/boot/boot-elf/Makefile
arch/xtensa/platforms/iss/setup.c
certs/system_certificates.S
drivers/acpi/acpi_lpss.c
drivers/acpi/bus.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/android/binder_alloc.c
drivers/ata/libata-core.c
drivers/base/firmware_loader/main.c
drivers/base/memory.c
drivers/base/power/clock_ops.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/rbd.c
drivers/block/rsxx/core.c
drivers/bluetooth/Kconfig
drivers/bluetooth/btmtkuart.c
drivers/bluetooth/hci_ldisc.c
drivers/bus/ti-sysc.c
drivers/char/Kconfig
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/random.c
drivers/clk/clk-npcm7xx.c
drivers/clk/x86/clk-pmc-atom.c
drivers/clk/x86/clk-st.c
drivers/cpuidle/governors/menu.c
drivers/crypto/caam/caamalg_qi.c
drivers/crypto/caam/caampkc.c
drivers/crypto/caam/jr.c
drivers/crypto/cavium/nitrox/nitrox_dev.h
drivers/crypto/cavium/nitrox/nitrox_lib.c
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
drivers/crypto/ccp/psp-dev.c
drivers/crypto/chelsio/chtls/chtls.h
drivers/crypto/chelsio/chtls/chtls_main.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_xts.c
drivers/dax/device.c
drivers/dax/pmem.c
drivers/dax/super.c
drivers/dma/dmaengine.c
drivers/dma/mic_x100_dma.c
drivers/firmware/arm_scmi/perf.c
drivers/fpga/dfl-fme-pr.c
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/fb_decoder.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.h
drivers/gpu/drm/mediatek/mtk_drm_ddp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/pl111/pl111_vexpress.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/vmbus_drv.c
drivers/hwmon/adt7475.c
drivers/hwmon/ina2xx.c
drivers/hwmon/nct6775.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core-base.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/temperature/maxim_thermocouple.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-bcm7038-l1.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-s3c24xx.c
drivers/irqchip/irq-stm32-exti.c
drivers/irqchip/irq-tango.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm-writecache.c
drivers/md/md-cluster.c
drivers/md/raid10.c
drivers/md/raid5-log.h
drivers/md/raid5.c
drivers/media/media-device.c
drivers/memory/ti-aemif.c
drivers/misc/cb710/core.c
drivers/misc/hmc6352.c
drivers/misc/ibmvmc.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/hbm.c
drivers/mmc/core/queue.c
drivers/mmc/core/queue.h
drivers/mmc/host/android-goldfish.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mtd/devices/m25p80.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/raw/denali.c
drivers/mtd/nand/raw/docg4.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/net/appletalk/ipddp.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global1_atu.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_7k.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/qualcomm/qca_spi.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/Makefile
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/phy/sfp.c
drivers/net/ppp/pppoe.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/xen-netfront.c
drivers/nvdimm/bus.c
drivers/nvdimm/dimm.c
drivers/nvdimm/dimm_devs.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/nd.h
drivers/nvdimm/pmem.c
drivers/nvdimm/pmem.h
drivers/nvdimm/region_devs.c
drivers/of/base.c
drivers/of/platform.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/switch/switchtec.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/platform/x86/alienware-wmi.c
drivers/platform/x86/dell-smbios-wmi.c
drivers/reset/Kconfig
drivers/reset/Makefile
drivers/reset/reset-imx7.c
drivers/reset/reset-meson-audio-arb.c [new file with mode: 0644]
drivers/s390/block/dcssblk.c
drivers/s390/crypto/ap_bus.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/aacraid.h
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/osd/osd_uld.c
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi.c
drivers/staging/erofs/Kconfig
drivers/staging/erofs/super.c
drivers/staging/fbtft/TODO
drivers/staging/gasket/TODO
drivers/staging/vboxvideo/vbox_drv.c
drivers/staging/vboxvideo/vbox_mode.c
drivers/staging/wilc1000/Makefile
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_debugfs.c
drivers/staging/wilc1000/wilc_wlan.c
drivers/staging/wilc1000/wilc_wlan_if.h
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/thermal/of-thermal.c
drivers/thermal/qoriq_thermal.c
drivers/thermal/rcar_gen3_thermal.c
drivers/thermal/rcar_thermal.c
drivers/tty/hvc/hvc_console.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/cdc-wdm.c
drivers/usb/common/common.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/message.c
drivers/usb/core/of.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/misc/uss720.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/mtu3/mtu3_hw_regs.h
drivers/usb/serial/io_ti.h
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/bus.c
drivers/usb/typec/class.c
drivers/vhost/vhost.c
drivers/xen/Kconfig
drivers/xen/cpu_hotplug.c
drivers/xen/events/events_base.c
drivers/xen/gntdev.c
drivers/xen/manage.c
drivers/xen/mem-reservation.c
drivers/xen/xen-balloon.c
drivers/xen/xenbus/xenbus_probe.c
fs/afs/proc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/super.c
fs/cifs/Kconfig
fs/cifs/cifs_unicode.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/dax.c
fs/devpts/inode.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/isofs/inode.c
fs/namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/pnfs.c
fs/nilfs2/alloc.c
fs/nilfs2/alloc.h
fs/nilfs2/bmap.c
fs/nilfs2/bmap.h
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/btree.h
fs/nilfs2/cpfile.c
fs/nilfs2/cpfile.h
fs/nilfs2/dat.c
fs/nilfs2/dat.h
fs/nilfs2/dir.c
fs/nilfs2/direct.c
fs/nilfs2/direct.h
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/ifile.h
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segbuf.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/sufile.h
fs/nilfs2/super.c
fs/nilfs2/sysfs.c
fs/nilfs2/sysfs.h
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/notify/fsnotify.c
fs/notify/mark.c
fs/ocfs2/buffer_head_io.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/proc/kcore.c
fs/pstore/ram_core.c
fs/quota/quota.c
fs/super.c
fs/udf/super.c
include/asm-generic/io.h
include/asm-generic/vmlinux.lds.h
include/drm/drm_drv.h
include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h [new file with mode: 0644]
include/linux/arm-smccc.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/dax.h
include/linux/export.h
include/linux/fs.h
include/linux/hid.h
include/linux/huge_mm.h
include/linux/i2c.h
include/linux/idr.h
include/linux/kcore.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mm_types_task.h
include/linux/mod_devicetable.h
include/linux/of.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/platform_data/ina2xx.h
include/linux/quota.h
include/linux/set_memory.h
include/linux/time32.h
include/linux/timekeeping.h
include/linux/tracepoint.h
include/linux/vga_switcheroo.h
include/linux/vm_event_item.h
include/linux/vmacache.h
include/net/act_api.h
include/net/cfg80211.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/pkt_cls.h
include/net/regulatory.h
include/net/tls.h
include/sound/hdaudio.h
include/sound/soc-dapm.h
include/uapi/linux/keyctl.h
include/uapi/linux/perf_event.h
include/uapi/linux/rds.h
include/uapi/linux/vhost.h
include/uapi/sound/skl-tplg-interface.h
include/xen/mem-reservation.h
init/Kconfig
ipc/shm.c
kernel/bpf/btf.c
kernel/bpf/hashtab.c
kernel/bpf/sockmap.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/dma/direct.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/fork.c
kernel/futex.c
kernel/jump_label.c
kernel/kallsyms.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/locking/test-ww_mutex.c
kernel/memremap.c
kernel/pid.c
kernel/printk/printk.c
kernel/printk/printk_safe.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/topology.c
kernel/sys.c
kernel/time/clocksource.c
kernel/trace/ring_buffer.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/idr.c
lib/percpu_counter.c
lib/radix-tree.c
lib/rhashtable.c
lib/test_ida.c [new file with mode: 0644]
mm/Kconfig
mm/Makefile
mm/debug.c
mm/fadvise.c
mm/hmm.c
mm/huge_memory.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/readahead.c
mm/shmem.c
mm/slub.c
mm/util.c
mm/vmacache.c
mm/vmscan.c
net/bluetooth/smp.c
net/core/dev.c
net/core/filter.c
net/core/neighbour.c
net/core/net_namespace.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dsa/dsa.c
net/dsa/slave.c
net/ipv4/af_inet.c
net/ipv4/igmp.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/netfilter/Kconfig
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/route.c
net/ipv6/udp.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/ncsi/ncsi-netlink.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_ct.c
net/netfilter/xt_CHECKSUM.c
net/netfilter/xt_cluster.c
net/netfilter/xt_hashlimit.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/Kconfig
net/rds/bind.c
net/rds/ib.c
net/rds/tcp.c
net/rfkill/rfkill-gpio.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/cls_u32.c
net/sched/sch_cake.c
net/sctp/proc.c
net/sctp/socket.c
net/socket.c
net/tipc/bcast.c
net/tipc/diag.c
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/netlink.c
net/tipc/netlink_compat.c
net/tipc/socket.c
net/tipc/socket.h
net/tipc/topsrv.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/util.c
net/xdp/xdp_umem.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.lib
scripts/Makefile.modpost
scripts/checkpatch.pl
scripts/clang-version.sh
scripts/coccinelle/api/alloc/zalloc-simple.cocci
scripts/depmod.sh
scripts/dtc/Makefile
scripts/gcc-plugins/Kconfig
scripts/kconfig/Makefile
scripts/kconfig/check-pkgconfig.sh [deleted file]
scripts/kconfig/conf.c
scripts/kconfig/gconf-cfg.sh
scripts/kconfig/mconf-cfg.sh
scripts/kconfig/mconf.c
scripts/kconfig/nconf-cfg.sh
scripts/kconfig/qconf-cfg.sh
scripts/kconfig/symbol.c
scripts/kconfig/tests/err_recursive_dep/Kconfig [moved from scripts/kconfig/tests/warn_recursive_dep/Kconfig with 93% similarity]
scripts/kconfig/tests/err_recursive_dep/__init__.py [new file with mode: 0644]
scripts/kconfig/tests/err_recursive_dep/expected_stderr [new file with mode: 0644]
scripts/kconfig/tests/warn_recursive_dep/__init__.py [deleted file]
scripts/kconfig/tests/warn_recursive_dep/expected_stderr [deleted file]
scripts/link-vmlinux.sh
scripts/mod/modpost.c
scripts/recordmcount.pl
scripts/setlocalversion
scripts/subarch.include [new file with mode: 0644]
security/Kconfig
security/apparmor/secid.c
security/keys/dh.c
sound/core/rawmidi.c
sound/firewire/bebob/bebob.c
sound/firewire/bebob/bebob_maudio.c
sound/firewire/digi00x/digi00x.c
sound/firewire/fireface/ff-protocol-ff400.c
sound/firewire/fireworks/fireworks.c
sound/firewire/oxfw/oxfw.c
sound/firewire/tascam/tascam.c
sound/hda/ext/hdac_ext_stream.c
sound/hda/hdac_controller.c
sound/pci/emu10k1/emufx.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/soc/amd/acp-pcm-dma.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/max98373.c
sound/soc/codecs/rt5514.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/sigmadsp.c
sound/soc/codecs/tas6424.c
sound/soc/codecs/wm8804-i2c.c
sound/soc/codecs/wm9712.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/skylake/skl.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/ssi.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/lib/memcpy_64.S
tools/bpf/bpftool/map.c
tools/bpf/bpftool/map_perf_ring.c
tools/hv/hv_kvp_daemon.c
tools/include/linux/lockdep.h
tools/include/linux/nmi.h [new file with mode: 0644]
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/kvm/kvm_stat/kvm_stat
tools/lib/lockdep/Makefile
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-plugin.c
tools/lib/traceevent/event-utils.h
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/parse-filter.c
tools/lib/traceevent/parse-utils.c
tools/lib/traceevent/plugin_cfg80211.c
tools/lib/traceevent/plugin_function.c
tools/lib/traceevent/plugin_hrtimer.c
tools/lib/traceevent/plugin_jbd2.c
tools/lib/traceevent/plugin_kmem.c
tools/lib/traceevent/plugin_kvm.c
tools/lib/traceevent/plugin_mac80211.c
tools/lib/traceevent/plugin_sched_switch.c
tools/lib/traceevent/plugin_scsi.c
tools/lib/traceevent/plugin_xen.c
tools/lib/traceevent/trace-seq.c
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Makefile
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/arch/s390/util/auxtrace.c
tools/perf/arch/x86/Makefile
tools/perf/arch/x86/include/arch-tests.h
tools/perf/arch/x86/tests/Build
tools/perf/arch/x86/tests/arch-tests.c
tools/perf/arch/x86/tests/bp-modify.c [new file with mode: 0644]
tools/perf/builtin-annotate.c
tools/perf/builtin-kmem.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/examples/bpf/augmented_syscalls.c [new file with mode: 0644]
tools/perf/examples/bpf/hello.c [new file with mode: 0644]
tools/perf/examples/bpf/sys_enter_openat.c [new file with mode: 0644]
tools/perf/include/bpf/bpf.h
tools/perf/include/bpf/stdio.h [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json [new file with mode: 0644]
tools/perf/pmu-events/arch/arm64/mapfile.csv
tools/perf/tests/bitmap.c
tools/perf/tests/code-reading.c
tools/perf/tests/kmod-path.c
tools/perf/tests/mem2node.c
tools/perf/ui/browsers/annotate.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/bpf-loader.c
tools/perf/util/bpf-loader.h
tools/perf/util/compress.h
tools/perf/util/data-convert-bt.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/llvm-utils.c
tools/perf/util/llvm-utils.h
tools/perf/util/lzma.c
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/mmap.c
tools/perf/util/mmap.h
tools/perf/util/namespaces.c
tools/perf/util/parse-events.c
tools/perf/util/python.c
tools/perf/util/s390-cpumsf-kernel.h [new file with mode: 0644]
tools/perf/util/s390-cpumsf.c [new file with mode: 0644]
tools/perf/util/s390-cpumsf.h [new file with mode: 0644]
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/setup.py
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event-scripting.c
tools/perf/util/trace-event.c
tools/perf/util/trace-event.h
tools/perf/util/zlib.c
tools/testing/nvdimm/pmem-dax.c
tools/testing/nvdimm/test/nfit.c
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux/xarray.h [new file with mode: 0644]
tools/testing/radix-tree/main.c
tools/testing/radix-tree/test.h
tools/testing/selftests/android/Makefile
tools/testing/selftests/android/config [moved from tools/testing/selftests/android/ion/config with 100% similarity]
tools/testing/selftests/android/ion/Makefile
tools/testing/selftests/cgroup/.gitignore
tools/testing/selftests/cgroup/cgroup_util.c
tools/testing/selftests/cgroup/cgroup_util.h
tools/testing/selftests/cgroup/test_memcontrol.c
tools/testing/selftests/efivarfs/config [new file with mode: 0644]
tools/testing/selftests/futex/functional/Makefile
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/kselftest.h
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/lib.mk
tools/testing/selftests/memory-hotplug/config
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/networking/timestamping/Makefile
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/testing/selftests/vm/Makefile
tools/vm/page-types.c
tools/vm/slabinfo.c
usr/Makefile
usr/gen_initramfs_list.sh [moved from scripts/gen_initramfs_list.sh with 99% similarity]
usr/initramfs_data.S
virt/kvm/arm/mmu.c
virt/kvm/arm/trace.h

index 2a6f685bf70633757017a3bd1ff72c9d080cd7fd..285e09645b31485d7bc314fb1fa7d055ec50fb46 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
 Randy Dunlap <rdunlap@infradead.org> <rdunlap@xenotime.net>
 Rémi Denis-Courmont <rdenis@simphalempin.com>
 Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
+Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
index 3d5951c8bf5fe8b27f47b016289c910f90af97e6..e8b60bd766f7649a430a877628e73d8936d66cbb 100644 (file)
@@ -73,3 +73,12 @@ KernelVersion:       3.0
 Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                 Number of sectors written by the frontend.
+
+What:          /sys/bus/xen-backend/devices/*/state
+Date:          August 2018
+KernelVersion: 4.19
+Contact:       Joe Jin <joe.jin@oracle.com>
+Description:
+                The state of the device. One of: 'Unknown',
+                'Initialising', 'Initialised', 'Connected', 'Closing',
+                'Closed', 'Reconfiguring', 'Reconfigured'.
index caa311d59ac1d24c92643f37b396407a1ab654f0..6d83f95a8a8e131c9a2f0454cd39128ffd3d7723 100644 (file)
@@ -75,3 +75,12 @@ Contact:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                Amount (in KiB) of low (or normal) memory in the
                balloon.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/scrub_pages
+Date:          September 2018
+KernelVersion: 4.20
+Contact:       xen-devel@lists.xenproject.org
+Description:
+               Control scrubbing pages before returning them to Xen for others domains
+               use. Can be set with xen_scrub_pages cmdline
+               parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
index 9871e649ffeffe8798cd20a1450377a4f4777fca..92eb1f42240d7168354dc7129898e2500ef95c1a 100644 (file)
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/blockdev/ramdisk.txt.
 
+       random.trust_cpu={on,off}
+                       [KNL] Enable or disable trusting the use of the
+                       CPU's random number generator (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_CPU.
+
        ras=option[,option,...] [KNL] RAS-specific options
 
                cec_disable     [X86]
                        Disables the PV optimizations forcing the HVM guest to
                        run as generic HVM guest with no PV drivers.
 
+       xen_scrub_pages=        [XEN]
+                       Boolean option to control scrubbing pages before giving them back
+                       to Xen, for use by other domains. Can be also changed at runtime
+                       with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+                       Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index f128f736b4a5025f6c1964253b05472d6c76dc8b..7169a0ec41d86911ad4a9c7fc34488841dc7c1e6 100644 (file)
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
       thread.
 
     * Changing the vector length causes all of P0..P15, FFR and all bits of
-      Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+      Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
       unspecified.  Calling PR_SVE_SET_VL with vl equal to the thread's current
       vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
       flag, does not constitute a change to the vector length for this purpose.
@@ -500,7 +500,7 @@ References
 [2] arch/arm64/include/uapi/asm/ptrace.h
     AArch64 Linux ptrace ABI definitions
 
-[3] linux/Documentation/arm64/cpu-feature-registers.txt
+[3] Documentation/arm64/cpu-feature-registers.txt
 
 [4] ARM IHI0055C
     http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
index 390c145f01d717fc9bc9499fe4f0fc5d8dbc1257..52a719b49afd4fdbbb1dc5adfb86972b458ed79b 100644 (file)
@@ -348,3 +348,7 @@ Version History
 1.13.1  Fix deadlock caused by early md_stop_writes().  Also fix size an
        state races.
 1.13.2  Fix raid redundancy validation and avoid keeping raid set frozen
+1.14.0  Fix reshape race on small devices.  Fix stripe adding reshape
+       deadlock/potential data corruption.  Update superblock when
+       specific devices are requested via rebuild.  Fix RAID leg
+       rebuild errors.
index 7809fbe0cdb7d65e71640e605068eba9f151234f..9b5685a1d15d9821efb9dd6d34a29ae1f788e31f 100644 (file)
@@ -94,7 +94,7 @@ cpus {
        };
 
        idle-states {
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_SLEEP_0: cpu-sleep-0 {
                        compatible = "arm,idle-state";
index 7a591333f2b199d8b9faf92f606bdf0d13060495..2c73847499abc8d73cfdd64c987d6896f804b5c7 100644 (file)
@@ -237,8 +237,8 @@ processor idle states, defined as device tree nodes, are listed.
                Value type: <stringlist>
                Usage and definition depend on ARM architecture version.
                        # On ARM v8 64-bit this property is required and must
-                         be one of:
-                          - "psci" (see bindings in [2])
+                         be:
+                          - "psci"
                        # On ARM 32-bit systems this property is optional
 
 The nodes describing the idle states (state) can only be defined within the
index 00e4365d720685679824b8d6659fd59a58845ec2..091c8dfd322910e14712d4a818e9879538abf3d9 100644 (file)
@@ -3,7 +3,6 @@
 Required properties:
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
-  - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
 Examples:
 
 lpi2c7: lpi2c7@40a50000 {
-       compatible = "fsl,imx8dv-lpi2c";
+       compatible = "fsl,imx7ulp-lpi2c";
        reg = <0x40A50000 0x10000>;
        interrupt-parent = <&intc>;
        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 697ca2f26d1b5d8b0d649709234958a2d0f3ada4..a046ed374d808d668195082afc950c5b14beff62 100644 (file)
@@ -13,6 +13,7 @@ Required properties:
     - "renesas,irqc-r8a7792" (R-Car V2H)
     - "renesas,irqc-r8a7793" (R-Car M2-N)
     - "renesas,irqc-r8a7794" (R-Car E2)
+    - "renesas,intc-ex-r8a774a1" (RZ/G2M)
     - "renesas,intc-ex-r8a7795" (R-Car H3)
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
index b0a8af51c388c51f544b4b4cc391563a08e82ca9..265b223cd9780158a8415e96ce7dfb16c192237a 100644 (file)
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
 attached to every HLIC: software interrupts, the timer interrupt, and external
 interrupts.  Software interrupts are used to send IPIs between cores.  The
 timer interrupt comes from an architecturally mandated real-time timer that is
-controller via Supervisor Binary Interface (SBI) calls and CSR reads.  External
+controlled via Supervisor Binary Interface (SBI) calls and CSR reads.  External
 interrupts connect all other device interrupts to the HLIC, which are routed
 via the platform-level interrupt controller (PLIC).
 
@@ -25,7 +25,15 @@ in the system.
 
 Required properties:
 - compatible : "riscv,cpu-intc"
-- #interrupt-cells : should be <1>
+- #interrupt-cells : should be <1>.  The interrupt sources are defined by the
+  RISC-V supervisor ISA manual, with only the following three interrupts being
+  defined for supervisor mode:
+    - Source 1 is the supervisor software interrupt, which can be sent by an SBI
+      call and is reserved for use by software.
+    - Source 5 is the supervisor timer interrupt, which can be configured by
+      SBI calls and implements a one-shot timer.
+    - Source 9 is the supervisor external interrupt, which chains to all other
+      device interrupts.
 - interrupt-controller : Identifies the node as an interrupt controller
 
 Furthermore, this interrupt-controller MUST be embedded inside the cpu
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
                ...
                cpu1-intc: interrupt-controller {
                        #interrupt-cells = <1>;
-                       compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
+                       compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
                        interrupt-controller;
                };
        };
index 41089369f89134b3fb42c98766a46986f1806cd4..b3acebe08eb0a9d4e3d4ac315cd513e340c65f16 100644 (file)
@@ -19,6 +19,10 @@ Required properties:
 - slaves               : Specifies number for slaves
 - active_slave         : Specifies the slave to use for time stamping,
                          ethtool and SIOCGMIIPHY
+- cpsw-phy-sel         : Specifies the phandle to the CPSW phy mode selection
+                         device. See also cpsw-phy-sel.txt for it's binding.
+                         Note that in legacy cases cpsw-phy-sel may be
+                         a child device instead of a phandle.
 
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
index 457d5ae16f23490b21bbcd6171734f8aa8858b94..3e17ac1d5d58caa2c89dae53648c5b1d33efe0dd 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
   Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+  Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
   Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
index 76db9f13ad96c08b77f5dae0af4ea616254ee811..abc36274227c7299bf5ec088e21ebdeed25a3950 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
              "renesas,ether-r8a7794"  if the device is a part of R8A7794 SoC.
              "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
              "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+             "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
              "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
              "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
                                        device.
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt b/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt
new file mode 100644 (file)
index 0000000..26e542e
--- /dev/null
@@ -0,0 +1,21 @@
+* Amlogic audio memory arbiter controller
+
+The Amlogic Audio ARB is a simple device which enables or
+disables the access of Audio FIFOs to DDR on AXG based SoC.
+
+Required properties:
+- compatible: 'amlogic,meson-axg-audio-arb'
+- reg: physical base address of the controller and length of memory
+       mapped region.
+- clocks: phandle to the fifo peripheral clock provided by the audio
+         clock controller.
+- #reset-cells: must be 1.
+
+Example on the A113 SoC:
+
+arb: reset-controller@280 {
+       compatible = "amlogic,meson-axg-audio-arb";
+       reg = <0x0 0x280 0x0 0x4>;
+       #reset-cells = <1>;
+       clocks = <&clkc_audio AUD_CLKID_DDR_ARB>;
+};
index 5d47a262474cfe0e4b32d1d814c6ac8bd3a20d45..9407212a85a8cac6918c56a27380bdb101e7af31 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
               Examples with soctypes are:
                 - "renesas,r8a7743-wdt" (RZ/G1M)
                 - "renesas,r8a7745-wdt" (RZ/G1E)
+                - "renesas,r8a774a1-wdt" (RZ/G2M)
                 - "renesas,r8a7790-wdt" (R-Car H2)
                 - "renesas,r8a7791-wdt" (R-Car M2-W)
                 - "renesas,r8a7792-wdt" (R-Car V2H)
@@ -21,8 +22,8 @@ Required properties:
                 - "renesas,r7s72100-wdt" (RZ/A1)
                The generic compatible string must be:
                 - "renesas,rza-wdt" for RZ/A
-                - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G
-                - "renesas,rcar-gen3-wdt" for R-Car Gen3
+                - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
+                - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
 
 - reg : Should contain WDT registers location and length
 - clocks : the clock feeding the watchdog timer.
index 2c00b072a4c8ca382f84af09ebea29150645380d..1e1057958dd3e68722df575e90d5f1b76ef1f29a 100644 (file)
@@ -66,17 +66,17 @@ early userspace image can be built by an unprivileged user.
 
 As a technical note, when directories and files are specified, the
 entire CONFIG_INITRAMFS_SOURCE is passed to
-scripts/gen_initramfs_list.sh.  This means that CONFIG_INITRAMFS_SOURCE
+usr/gen_initramfs_list.sh.  This means that CONFIG_INITRAMFS_SOURCE
 can really be interpreted as any legal argument to
 gen_initramfs_list.sh.  If a directory is specified as an argument then
 the contents are scanned, uid/gid translation is performed, and
 usr/gen_init_cpio file directives are output.  If a directory is
-specified as an argument to scripts/gen_initramfs_list.sh then the
+specified as an argument to usr/gen_initramfs_list.sh then the
 contents of the file are simply copied to the output.  All of the output
 directives from directory scanning and file contents copying are
 processed by usr/gen_init_cpio.
 
-See also 'scripts/gen_initramfs_list.sh -h'.
+See also 'usr/gen_initramfs_list.sh -h'.
 
 Where's this all leading?
 =========================
index b176928e69631f5efe3cb37506b3eb375cc3e23a..79637d227e856ceb1d1c9fa9e306ff11679154d6 100644 (file)
@@ -164,7 +164,7 @@ Documentation/early-userspace/README for more details.)
 The kernel does not depend on external cpio tools.  If you specify a
 directory instead of a configuration file, the kernel's build infrastructure
 creates a configuration file from that directory (usr/Makefile calls
-scripts/gen_initramfs_list.sh), and proceeds to package up that directory
+usr/gen_initramfs_list.sh), and proceeds to package up that directory
 using the config file (by feeding it to usr/gen_init_cpio, which is created
 from usr/gen_init_cpio.c).  The kernel's build-time cpio creation code is
 entirely self-contained, and the kernel's boot-time extractor is also
index 4b2084d0f1fb26c9b86bd7e2e587590bc1c1b31a..a6c6a8af48a296cf9b7197c8f065370814efd90d 100644 (file)
@@ -848,7 +848,7 @@ struct file_operations
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-4.1, the following members are defined:
+4.18, the following members are defined:
 
 struct file_operations {
        struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
+       int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
-       int (*mremap)(struct file *, struct vm_area_struct *);
        int (*open) (struct inode *, struct file *);
        int (*flush) (struct file *, fl_owner_t id);
        int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
 #ifndef CONFIG_MMU
        unsigned (*mmap_capabilities)(struct file *);
 #endif
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
 
   iterate: called when the VFS needs to read the directory contents
 
+  iterate_shared: called when the VFS needs to read the directory contents
+       when filesystem supports concurrent dir iterators
+
   poll: called by the VFS when a process wants to check if there is
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
 
   fallocate: called by the VFS to preallocate blocks or punch a hole.
 
+  copy_file_range: called by the copy_file_range(2) system call.
+
+  clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
+       FICLONE commands.
+
+  dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
+       command.
+
+  fadvise: possibly called by the fadvise64() system call.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
index 72d16f08e431c674c62738641d7c28926d6331cb..b8df81f6d6bcf995e098ebb4dce0a57a483bba66 100644 (file)
@@ -32,7 +32,7 @@ Supported chips:
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
 
 Description
 -----------
index 966610aa4620fa8ce19e52fffd2d44c5a2128367..203002054120568135a01f7e4ab343a5223279bd 100644 (file)
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
 returned buffer. If NULL is returned, the threshold was not met or a bounce
 buffer could not be allocated. Fall back to PIO in that case.
 
-In any case, a buffer obtained from above needs to be released. It ensures data
-is copied back to the message and a potentially used bounce buffer is freed::
+In any case, a buffer obtained from above needs to be released. Another helper
+function ensures a potentially used bounce buffer is freed::
 
-       i2c_release_dma_safe_msg_buf(msg, dma_buf);
+       i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
+
+The last argument 'xferred' controls if the buffer is synced back to the
+message or not. No syncing is needed in cases setting up DMA had an error and
+there was no data transferred.
 
 The bounce buffer handling from the core is generic and simple. It will always
 allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
index c54cb7cb9ff45fc49189161bc80d3586f0e91586..864e740811dadab3c8f7dafb4a240656a9e9d0bf 100644 (file)
@@ -545,7 +545,7 @@ make KBUILD_KCONFIG=Documentation/kbuild/Kconfig.recursion-issue-02 allnoconfig
 Practical solutions to kconfig recursive issue
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Developers who run into the recursive Kconfig issue have three options
+Developers who run into the recursive Kconfig issue have two options
 at their disposal. We document them below and also provide a list of
 historical issues resolved through these different solutions.
 
@@ -553,7 +553,6 @@ historical issues resolved through these different solutions.
   b) Match dependency semantics:
        b1) Swap all "select FOO" to "depends on FOO" or,
        b2) Swap all "depends on FOO" to "select FOO"
-  c) Consider the use of "imply" instead of "select"
 
 The resolution to a) can be tested with the sample Kconfig file
 Documentation/kbuild/Kconfig.recursion-issue-01 through the removal
index 766355b1d2212236c2501835aece0b402d7454ae..7b6a2b2bdc98db2e794a261ff7be17dc3df0ae26 100644 (file)
@@ -680,7 +680,7 @@ Both possibilities are described in the following.
 
        Example:
                #scripts/kconfig/Makefile
-               HOSTLOADLIBES_qconf := -L$(QTDIR)/lib
+               HOSTLDLIBS_qconf := -L$(QTDIR)/lib
 
        When linking qconf, it will be passed the extra option
        "-L$(QTDIR)/lib".
index 61f918b10a0c74d7c8369009ce0b6447d31f903a..d1bf143b446f3b60bc374b40dc7b6818d1c21aa8 100644 (file)
@@ -86,7 +86,7 @@ pkg-config
 
 The build system, as of 4.18, requires pkg-config to check for installed
 kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'.  Previously pkg-config was being used but not
+'make {g,x}config'.  Previously pkg-config was being used but not
 verified or documented.
 
 Flex
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
new file mode 100644 (file)
index 0000000..ab7c24b
--- /dev/null
@@ -0,0 +1,81 @@
+Contributor Covenant Code of Conduct
+++++++++++++++++++++++++++++++++++++
+
+Our Pledge
+==========
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and
+expression, level of experience, education, socio-economic status, nationality,
+personal appearance, race, religion, or sexual identity and orientation.
+
+Our Standards
+=============
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others’ private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+
+Our Responsibilities
+====================
+
+Maintainers are responsible for clarifying the standards of acceptable behavior
+and are expected to take appropriate and fair corrective action in response to
+any instances of unacceptable behavior.
+
+Maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+Scope
+=====
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+Enforcement
+===========
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the Technical Advisory Board (TAB) at
+<tab@lists.linux-foundation.org>. All complaints will be reviewed and
+investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. The TAB is obligated to maintain
+confidentiality with regard to the reporter of an incident.  Further details of
+specific enforcement policies may be posted separately.
+
+Maintainers who do not follow or enforce the Code of Conduct in good faith may
+face temporary or permanent repercussions as determined by other members of the
+project’s leadership.
+
+Attribution
+===========
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst
deleted file mode 100644 (file)
index 47b6de7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Code of Conflict
-----------------
-
-The Linux kernel development effort is a very personal process compared
-to "traditional" ways of developing software.  Your code and ideas
-behind it will be carefully reviewed, often resulting in critique and
-criticism.  The review will almost always require improvements to the
-code before it can be included in the kernel.  Know that this happens
-because everyone involved wants to see the best possible solution for
-the overall success of Linux.  This development process has been proven
-to create the most robust operating system kernel ever, and we do not
-want to do anything to cause the quality of submission and eventual
-result to ever decrease.
-
-If however, anyone feels personally abused, threatened, or otherwise
-uncomfortable due to this process, that is not acceptable.  If so,
-please contact the Linux Foundation's Technical Advisory Board at
-<tab@lists.linux-foundation.org>, or the individual members, and they
-will work to resolve the issue to the best of their ability.  For more
-information on who is on the Technical Advisory Board and what their
-role is, please see:
-
-       - http://www.linuxfoundation.org/projects/linux/tab
-
-As a reviewer of code, please strive to keep things civil and focused on
-the technical issues involved.  We are all humans, and frustrations can
-be high on both sides of the process.  Try to keep in mind the immortal
-words of Bill and Ted, "Be excellent to each other."
index 37bd0628b6ee90989a8471cff080c78cc32c69cb..9ae3e317bddf917b1a177d705af15a369508a4bb 100644 (file)
@@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
    :maxdepth: 1
 
    howto
-   code-of-conflict
+   code-of-conduct
    development-process
    submitting-patches
    coding-style
index 25a4b4cf04a6d462f4b640a396ae178b561984e2..92999d4e0cb800cfad588f413a0a23210fe92861 100644 (file)
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
                        allowing boot to proceed.  none ignores them, expecting
                        user space to do the scan.
 
+       scsi_mod.use_blk_mq=
+                       [SCSI] use blk-mq I/O path by default
+                       See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
+                       Format: <y/n>
+
        sim710=         [SCSI,HW]
                        See header of drivers/scsi/sim710.c.
 
index 688e3eeed21dced8dcb05ece8586475e54077548..46933e06c972b79fe77b97be97e1600dd014074b 100644 (file)
@@ -35,25 +35,25 @@ and two USB cables, connected like this:
 ( If your system does not list a debug port capability then you probably
   won't be able to use the USB debug key. )
 
- b.) You also need a Netchip USB debug cable/key:
+ b.) You also need a NetChip USB debug cable/key:
 
         http://www.plxtech.com/products/NET2000/NET20DC/default.asp
 
-     This is a small blue plastic connector with two USB connections,
+     This is a small blue plastic connector with two USB connections;
      it draws power from its USB connections.
 
  c.) You need a second client/console system with a high speed USB 2.0
      port.
 
- d.) The Netchip device must be plugged directly into the physical
+ d.) The NetChip device must be plugged directly into the physical
      debug port on the "host/target" system.  You cannot use a USB hub in
      between the physical debug port and the "host/target" system.
 
      The EHCI debug controller is bound to a specific physical USB
-     port and the Netchip device will only work as an early printk
+     port and the NetChip device will only work as an early printk
      device in this port.  The EHCI host controllers are electrically
      wired such that the EHCI debug controller is hooked up to the
-     first physical and there is no way to change this via software.
+     first physical port and there is no way to change this via software.
      You can find the physical port through experimentation by trying
      each physical port on the system and rebooting.  Or you can try
      and use lsusb or look at the kernel info messages emitted by the
@@ -65,9 +65,9 @@ and two USB cables, connected like this:
      to the hardware vendor, because there is no reason not to wire
      this port into one of the physically accessible ports.
 
- e.) It is also important to note, that many versions of the Netchip
+ e.) It is also important to note, that many versions of the NetChip
      device require the "client/console" system to be plugged into the
-     right and side of the device (with the product logo facing up and
+     right hand side of the device (with the product logo facing up and
      readable left to right).  The reason being is that the 5 volt
      power supply is taken from only one side of the device and it
      must be the side that does not get rebooted.
@@ -81,13 +81,18 @@ and two USB cables, connected like this:
       CONFIG_EARLY_PRINTK_DBGP=y
 
     And you need to add the boot command line: "earlyprintk=dbgp".
+
     (If you are using Grub, append it to the 'kernel' line in
-     /etc/grub.conf)
+     /etc/grub.conf.  If you are using Grub2 on a BIOS firmware system,
+     append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
+     using Grub2 on an EFI firmware system, append it to the 'linux'
+     or 'linuxefi' line in /boot/grub2/grub.cfg or
+     /boot/efi/EFI/<distro>/grub.cfg.)
 
     On systems with more than one EHCI debug controller you must
     specify the correct EHCI debug controller number.  The ordering
     comes from the PCI bus enumeration of the EHCI controllers.  The
-    default with no number argument is "0" the first EHCI debug
+    default with no number argument is "0" or the first EHCI debug
     controller.  To use the second EHCI debug controller, you would
     use the command line: "earlyprintk=dbgp1"
 
@@ -111,7 +116,7 @@ and two USB cables, connected like this:
     see the raw output.
 
  c.) On Nvidia Southbridge based systems: the kernel will try to probe
-     and find out which port has debug device connected.
+     and find out which port has debug device connected.
 
 3. Testing that it works fine:
 
index e20e7c42347bd78204c3df4f4fcbb263c4282534..e993064637ca060b878ceec90cade52ec44c84c3 100644 (file)
@@ -2311,6 +2311,7 @@ F:        drivers/clocksource/cadence_ttc_timer.c
 F:     drivers/i2c/busses/i2c-cadence.c
 F:     drivers/mmc/host/sdhci-of-arasan.c
 F:     drivers/edac/synopsys_edac.c
+F:     drivers/i2c/busses/i2c-xiic.c
 
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
@@ -4364,7 +4365,8 @@ F:        drivers/i2c/busses/i2c-diolan-u2c.c
 
 FILESYSTEM DIRECT ACCESS (DAX)
 M:     Matthew Wilcox <mawilcox@microsoft.com>
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
+M:     Jan Kara <jack@suse.cz>
 L:     linux-fsdevel@vger.kernel.org
 S:     Supported
 F:     fs/dax.c
@@ -4374,7 +4376,7 @@ F:        include/trace/events/fs_dax.h
 DEVICE DIRECT ACCESS (DAX)
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 L:     linux-nvdimm@lists.01.org
 S:     Supported
@@ -5623,6 +5625,8 @@ F:        lib/fault-inject.c
 
 FBTFT Framebuffer drivers
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/fbtft/
 
@@ -6058,7 +6062,7 @@ F:        Documentation/gcc-plugins.txt
 
 GASKET DRIVER FRAMEWORK
 M:     Rob Springer <rspringer@google.com>
-M:     John Joseph <jnjoseph@google.com>
+M:     Todd Poynor <toddpoynor@google.com>
 M:     Ben Chan <benchan@chromium.org>
 S:     Maintained
 F:     drivers/staging/gasket/
@@ -7014,6 +7018,20 @@ F:       drivers/crypto/vmx/aes*
 F:     drivers/crypto/vmx/ghash*
 F:     drivers/crypto/vmx/ppc-xlate.pl
 
+IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpaphp*
+
+IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpadlpar*
+
 IBM ServeRAID RAID DRIVER
 S:     Orphan
 F:     drivers/scsi/ips.*
@@ -8254,9 +8272,9 @@ F:        drivers/ata/pata_arasan_cf.c
 
 LIBATA PATA DRIVERS
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
-M:     Jens Axboe <kernel.dk>
+M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/pata_*.c
 F:     drivers/ata/ata_generic.c
@@ -8274,7 +8292,7 @@ LIBATA SATA AHCI PLATFORM devices support
 M:     Hans de Goede <hdegoede@redhat.com>
 M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/ahci_platform.c
 F:     drivers/ata/libahci_platform.c
@@ -8290,7 +8308,7 @@ F:        drivers/ata/sata_promise.*
 LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
 M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/
 F:     include/linux/ata.h
@@ -8298,12 +8316,12 @@ F:      include/linux/libata.h
 F:     Documentation/devicetree/bindings/ata/
 
 LIBLOCKDEP
-M:     Sasha Levin <alexander.levin@verizon.com>
+M:     Sasha Levin <alexander.levin@microsoft.com>
 S:     Maintained
 F:     tools/lib/lockdep/
 
 LIBNVDIMM BLK: MMIO-APERTURE DRIVER
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
@@ -8316,7 +8334,7 @@ F:        drivers/nvdimm/region_devs.c
 LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
 M:     Dave Jiang <dave.jiang@intel.com>
 L:     linux-nvdimm@lists.01.org
 Q:     https://patchwork.kernel.org/project/linux-nvdimm/list/
@@ -8324,7 +8342,7 @@ S:        Supported
 F:     drivers/nvdimm/btt*
 
 LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
 M:     Dan Williams <dan.j.williams@intel.com>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
@@ -8343,7 +8361,7 @@ F:        Documentation/devicetree/bindings/pmem/pmem-region.txt
 
 LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
 M:     Dan Williams <dan.j.williams@intel.com>
-M:     Ross Zwisler <ross.zwisler@linux.intel.com>
+M:     Ross Zwisler <zwisler@kernel.org>
 M:     Vishal Verma <vishal.l.verma@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
 L:     linux-nvdimm@lists.01.org
@@ -11152,7 +11170,7 @@ F:      drivers/pci/controller/dwc/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
-M:     Joao Pinto <Joao.Pinto@synopsys.com>
+M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11344,10 +11362,10 @@ S:    Maintained
 F:     drivers/platform/x86/peaq-wmi.c
 
 PER-CPU MEMORY ALLOCATOR
+M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
-M:     Dennis Zhou <dennisszhou@gmail.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 S:     Maintained
 F:     include/linux/percpu*.h
 F:     mm/percpu*.c
@@ -13431,9 +13449,8 @@ F:      drivers/i2c/busses/i2c-synquacer.c
 F:     Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
 
 SOCIONEXT UNIPHIER SOUND DRIVER
-M:     Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Maintained
+S:     Orphan
 F:     sound/soc/uniphier/
 
 SOEKRIS NET48XX LED SUPPORT
index c13f8b85ba6034283e7397bbb332223d38de8f7d..f03a1e0625030027d7b9395d85c2d76b82530d28 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
-PATCHLEVEL = 18
+PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
 KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
 export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
 
-# SUBARCH tells the usermode build what the underlying arch is.  That is set
-# first, and if a usermode build is happening, the "ARCH=um" on the command
-# line overrides the setting of ARCH below.  If a native build is happening,
-# then ARCH is assigned, getting whatever value it gets normally, and
-# SUBARCH is subsequently ignored.
-
-SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-                                 -e s/sun4u/sparc64/ \
-                                 -e s/arm.*/arm/ -e s/sa110/arm/ \
-                                 -e s/s390x/s390/ -e s/parisc64/parisc/ \
-                                 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
-                                 -e s/riscv.*/riscv/)
+include scripts/subarch.include
 
 # Cross compiling and selecting different set of gcc/bin-utils
 # ---------------------------------------------------------------------------
@@ -440,7 +428,7 @@ KBUILD_CFLAGS_KERNEL :=
 KBUILD_AFLAGS_MODULE  := -DMODULE
 KBUILD_CFLAGS_MODULE  := -DMODULE
 KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
-LDFLAGS :=
+KBUILD_LDFLAGS :=
 GCC_PLUGINS_CFLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
@@ -448,7 +436,7 @@ export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
 export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
 export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
 
-export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
+export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
 export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
 export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
@@ -507,9 +495,13 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
 endif
 
 RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
 RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
 RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
 export RETPOLINE_CFLAGS
+export RETPOLINE_VDSO_CFLAGS
 
 KBUILD_CFLAGS  += $(call cc-option,-fno-PIE)
 KBUILD_AFLAGS  += $(call cc-option,-fno-PIE)
@@ -612,6 +604,11 @@ CFLAGS_GCOV        := -fprofile-arcs -ftest-coverage \
        $(call cc-disable-warning,maybe-uninitialized,)
 export CFLAGS_GCOV
 
+# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
+ifdef CONFIG_FUNCTION_TRACER
+  CC_FLAGS_FTRACE := -pg
+endif
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -751,9 +748,6 @@ KBUILD_CFLAGS       += $(call cc-option, -femit-struct-debug-baseonly) \
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
-ifndef CC_FLAGS_FTRACE
-CC_FLAGS_FTRACE := -pg
-endif
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
   # gcc 5 supports generating the mcount tables directly
   ifeq ($(call cc-option-yn,-mrecord-mcount),y)
@@ -790,8 +784,8 @@ KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
 endif
 
 ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
-KBUILD_CFLAGS_KERNEL   += $(call cc-option,-ffunction-sections,)
-KBUILD_CFLAGS_KERNEL   += $(call cc-option,-fdata-sections,)
+KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+LDFLAGS_vmlinux += --gc-sections
 endif
 
 # arch Makefile may override CC so keep this after arch Makefile is included
@@ -803,6 +797,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
 # disable pointer signed / unsigned warnings in gcc 4.0
 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 
+# disable stringop warnings in gcc 8+
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += $(call cc-option,-fno-strict-overflow)
 
@@ -857,10 +854,6 @@ LDFLAGS_BUILD_ID := $(call ld-option, --build-id)
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
-ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
-LDFLAGS_vmlinux        += $(call ld-option, --gc-sections,)
-endif
-
 ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
 LDFLAGS_vmlinux        += $(call ld-option, -X,)
 endif
@@ -1024,7 +1017,7 @@ ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
 
 # Final link of vmlinux with optional arch pass after final link
 cmd_link-vmlinux =                                                 \
-       $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ;    \
+       $(CONFIG_SHELL) $< $(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) ;    \
        $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
 vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE
@@ -1354,16 +1347,12 @@ distclean: mrproper
 
 # Packaging of the kernel to various formats
 # ---------------------------------------------------------------------------
-# rpm target kept for backward compatibility
 package-dir    := scripts/package
 
 %src-pkg: FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
 %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: rpm-pkg
-       @echo "  WARNING: \"rpm\" target will be removed after Linux 4.18"
-       @echo "           Please use \"rpm-pkg\" instead."
 
 
 # Brief documentation of the typical targets used
index 6d5eb8267e429fa3eac927b053c31944500a74da..b4441b0764d71aff87b67fba665163b57bdb20b6 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IOREMAP_PROT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZMA
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
        select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_KERNEL_GZIP
-       select HAVE_KERNEL_LZMA
-       select ARCH_HAS_PTE_SPECIAL
 
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
index 6c1b20dd76ad902655d7317eb44580923d98c690..99cce77ab98f2d79c3dbef3130bff70b91ea076d 100644 (file)
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
 LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/current.h
 endif
 
-upto_gcc44    :=  $(call cc-ifversion, -le, 0404, y)
-atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
-
-cflags-$(atleast_gcc44)                        += -fsection-anchors
+cflags-y                               += -fsection-anchors
 
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
@@ -82,11 +79,6 @@ cflags-$(disable_small_data)         += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
-# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
-# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
-ldflags-$(upto_gcc44)                  += -marclinux
-
 LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
@@ -95,7 +87,7 @@ KBUILD_CFLAGS_MODULE  += -mlong-calls -mno-millicode
 # Finally dump eveything into kernel build system
 KBUILD_CFLAGS  += $(cflags-y)
 KBUILD_AFLAGS  += $(KBUILD_CFLAGS)
-LDFLAGS                += $(ldflags-y)
+KBUILD_LDFLAGS += $(ldflags-y)
 
 head-y         := arch/arc/kernel/head.o
 
index dc91c663bcc02e2cdc116f40ad31757d711ac485..d75d65ddf8e31db78c58fa9882b90c2e6be2ed4b 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * The DW APB ICTL intc on MB is connected to CPU intc via a
         * DT "invisible" DW APB GPIO block, configured to simply pass thru
index 69ff4895f2ba4b558f2bdfed547ef0ec27288174..a05bb737ea6392f5e77cd3830dceb8afe620943e 100644 (file)
                };
        };
 
+       /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
        /*
         * This INTC is actually connected to DW APB GPIO
         * which acts as a wire between MB INTC and CPU INTC.
index 47b74fbc403c21cc2f493f6f84d6216b7c5ef5c1..37bafd44e36d0fed9b85e80ea356cd78df0c1872 100644 (file)
@@ -9,6 +9,10 @@
  */
 
 / {
+       aliases {
+               ethernet = &gmac;
+       };
+
        axs10x_mb {
                compatible = "simple-bus";
                #address-cells = <1>;
@@ -68,7 +72,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               gmac: ethernet@0x18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
                        max-speed = <100>;
                        resets = <&creg_rst 5>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
                ehci@0x40000 {
index 006aa3de5348f31c7462f52f173ad2e74434d062..ef149f59929ae394a30695fa0940060acef15817 100644 (file)
                bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
+       aliases {
+               ethernet = &gmac;
+       };
+
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        #clock-cells = <0>;
                };
 
-               ethernet@8000 {
+               gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = <0x8000 0x2000>;
                        phy-handle = <&phy0>;
                        resets = <&cgu_rst HSDK_ETH_RESET>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+                       dma-coherent;
 
                        mdio {
                                #address-cells = <1>;
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                ehci@40000 {
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                mmc@a000 {
                        clock-names = "biu", "ciu";
                        interrupts = <12>;
                        bus-width = <4>;
+                       dma-coherent;
                };
        };
 
index a635ea972304e3531b205c23a0e3ef814608e313..41bc08be6a3b4202bbe27f74fdc8e01a56e4c3cd 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index aa507e423075b16be125d95fbb29b55b5b08683c..1e1c4a8011b523dc88b89fb39e90dfeab5a3154b 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index eba07f4686545ed00383756ae53ba404b2b2b25e..6b0c0cfd5c304fd6ae58fc3fd92d9cb53e086d2d 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 098b19fbaa51f0116e7f0328eb3a17feb72f0123..240dd2cd514855ae96010f0e487c76b8bd37a6f9 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 0104c404d8970ee44ecb0ced17fe137363e4cf5b..14ae7e5acc7c9cc381ebd9be04ad36a24b282a20 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 6491be0ddbc9e9cfd457dccc452d5bebf28c1183..1dec2b4bc5e6ea70696249d6815dfe69e73eb21c 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NO_HZ_IDLE=y
index 7c9c706ae7f66eb29d4cf48ca0b95d5dd44630f5..31ba224bbfb474985b49930dea193c6bbb1a5f37 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 99e05cf63fca2c6d953b952386b0cf1649ae7332..8e0b8b134cd9ed89652b88aea3bade03881e95c9 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 0dc4f9b737e7a4f48b41ae7caaedaa2ce89c5b40..739b90e5e8931f1e5f1443aa19b3e0fac93a155b 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index be3c30a15e54c09db51112ca88fd9b32d73a0d34..b5895bdf3a9393027d9fed716c4b83f86a9e4915 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 3a74b9b217723d2c2c75a91510ef1aadeab7b89a..f14eeff7d3084948c16d8905677ec25a629ccdcc 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index ea2834b4dc1dad187193549b7b146da413726c37..025298a483056b1ca782e83056f8b0a44d193809 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 80a5a1b4924bcf086ed57c34d7778304288f35a2..df7b77b13b823dc0c8d41f543181b12a20212cbd 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 2cc87f909747c1818385de9ba99c0bbeda6197b8..a7f65313f84a56a3ddc0307c669bbfbcf4c0386f 100644 (file)
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index f629493929ea620a50630ba49842383ecc3662a4..db47c3541f15931b2927fd1bd27749f2568e9761 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
 CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 21f0ca26a05d5cffeb58541aa127ed1154a3ec98..a8ac5e917d9a5895a4bc3ba30be01fd222ecec71 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
index 4e0072730241220c84ddc5019bba91c856de6f34..158af079838d007480f66b7d7ffe08d72b16c2d7 100644 (file)
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
        "1:     llock   %[orig], [%[ctr]]               \n"             \
        "       " #asm_op " %[val], %[orig], %[i]       \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val),                                          \
          [orig] "=&r" (orig)                                           \
        : [ctr] "r"     (&v->counter),                                  \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..c946c0a
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2018 Synopsys, Inc. (www.synopsys.com)
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-mapping.h>
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent);
+#define arch_setup_dma_ops arch_setup_dma_ops
+
+#endif
index 783b20354f8bf7889075ec4219310eb9b6f4fbf7..e8d9fb4523462a9807358fea19c4e7668cc1126d 100644 (file)
@@ -83,9 +83,6 @@ done:
 static void show_faulting_vma(unsigned long address, char *buf)
 {
        struct vm_area_struct *vma;
-       struct inode *inode;
-       unsigned long ino = 0;
-       dev_t dev = 0;
        char *nm = buf;
        struct mm_struct *active_mm = current->active_mm;
 
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
         * if the container VMA is not found
         */
        if (vma && (vma->vm_start <= address)) {
-               struct file *file = vma->vm_file;
-               if (file) {
-                       nm = file_path(file, buf, PAGE_SIZE - 1);
-                       inode = file_inode(vma->vm_file);
-                       dev = inode->i_sb->s_dev;
-                       ino = inode->i_ino;
+               if (vma->vm_file) {
+                       nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+                       if (IS_ERR(nm))
+                               nm = "?";
                }
                pr_info("    @off 0x%lx in [%s]\n"
                        "    VMA: 0x%08lx to 0x%08lx\n",
index 25c631942500ffe2802654f6690d9a223e2fbfaf..f2701c13a66b209571ff89b71ac6c93cabb9835d 100644 (file)
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
                       perip_base,
-                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
+                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
 
        return buf;
 }
@@ -896,15 +896,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
        slc_op(start, sz, OP_FLUSH);
 }
 
-/*
- * DMA ops for systems with IOC
- * IOC hardware snoops all DMA traffic keeping the caches consistent with
- * memory - eliding need for any explicit cache maintenance of DMA buffers
- */
-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
-
 /*
  * Exported DMA API
  */
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+        * simultaneously. This happens because as of today IOC aperture covers
+        * only ZONE_NORMAL (low mem) and any dma transactions outside this
+        * region won't be HW coherent.
+        * If we want to use both IOC and ZONE_HIGHMEM we can use
+        * bounce_buffer to handle dma transactions to HIGHMEM.
+        * Also it is possible to modify dma_direct cache ops or increase IOC
+        * aperture size if we are planning to use HIGHMEM without PAE.
+        */
+       if (IS_ENABLED(CONFIG_HIGHMEM))
+               panic("IOC and HIGHMEM can't be used simultaneously");
+
        /* Flush + invalidate + disable L1 dcache */
        __dc_disable();
 
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && ioc_enable)
                arc_ioc_setup();
 
-       if (is_isa_arcv2() && ioc_enable) {
-               __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
-               __dma_cache_inv = __dma_cache_inv_ioc;
-               __dma_cache_wback = __dma_cache_wback_ioc;
-       } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+       if (is_isa_arcv2() && l2_line_sz && slc_enable) {
                __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
                __dma_cache_inv = __dma_cache_inv_slc;
                __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
                __dma_cache_inv = __dma_cache_inv_l1;
                __dma_cache_wback = __dma_cache_wback_l1;
        }
+       /*
+        * In case of IOC (say IOC+SLC case), pointers above could still be set
+        * but end up not being relevant as the first function in chain is not
+        * called at all for @dma_direct_ops
+        *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+        */
 }
 
 void __ref arc_cache_init(void)
index ec47e6079f5d08371a65ea21277b2985bec989d5..c75d5c3470e3595ce7af09f00346d5f82fc92a4c 100644 (file)
@@ -6,20 +6,17 @@
  * published by the Free Software Foundation.
  */
 
-/*
- * DMA Coherent API Notes
- *
- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessing it using a kernel virtual address, with
- * Cache bit off in the TLB entry.
- *
- * The default DMA address == Phy address which is 0x8000_0000 based.
- */
-
 #include <linux/dma-noncoherent.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
 
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
+ *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ *  - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs)
 {
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        struct page *page;
        phys_addr_t paddr;
        void *kvaddr;
-       int need_coh = 1, need_kvaddr = 0;
-
-       page = alloc_pages(gfp, order);
-       if (!page)
-               return NULL;
+       bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
 
        /*
-        * IOC relies on all data (even coherent DMA data) being in cache
-        * Thus allocate normal cached memory
-        *
-        * The gains with IOC are two pronged:
-        *   -For streaming data, elides need for cache maintenance, saving
-        *    cycles in flush code, and bus bandwidth as all the lines of a
-        *    buffer need to be flushed out to memory
-        *   -For coherent data, Read/Write to buffers terminate early in cache
-        *   (vs. always going to memory - thus are faster)
+        * __GFP_HIGHMEM flag is cleared by upper layer functions
+        * (in include/linux/dma-mapping.h) so we should never get a
+        * __GFP_HIGHMEM here.
         */
-       if ((is_isa_arcv2() && ioc_enable) ||
-           (attrs & DMA_ATTR_NON_CONSISTENT))
-               need_coh = 0;
+       BUG_ON(gfp & __GFP_HIGHMEM);
 
-       /*
-        * - A coherent buffer needs MMU mapping to enforce non-cachability
-        * - A highmem page needs a virtual handle (hence MMU mapping)
-        *   independent of cachability
-        */
-       if (PageHighMem(page) || need_coh)
-               need_kvaddr = 1;
+       page = alloc_pages(gfp, order);
+       if (!page)
+               return NULL;
 
        /* This is linear addr (0x8000_0000 based) */
        paddr = page_to_phys(page);
 
        *dma_handle = paddr;
 
-       /* This is kernel Virtual address (0x7000_0000 based) */
-       if (need_kvaddr) {
+       /*
+        * A coherent buffer needs MMU mapping to enforce non-cachability.
+        * kvaddr is kernel Virtual address (0x7000_0000 based).
+        */
+       if (need_coh) {
                kvaddr = ioremap_nocache(paddr, size);
                if (kvaddr == NULL) {
                        __free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 {
        phys_addr_t paddr = dma_handle;
        struct page *page = virt_to_page(paddr);
-       int is_non_coh = 1;
-
-       is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
-                       (is_isa_arcv2() && ioc_enable);
 
-       if (PageHighMem(page) || !is_non_coh)
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                iounmap((void __force __iomem *)vaddr);
 
        __free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                break;
        }
 }
+
+/*
+ * Plug in coherent or noncoherent dma ops
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent)
+{
+       /*
+        * IOC hardware snoops all DMA traffic keeping the caches consistent
+        * with memory - eliding need for any explicit cache maintenance of
+        * DMA buffers - so we can use dma_direct cache ops.
+        */
+       if (is_isa_arcv2() && ioc_enable && coherent) {
+               set_dma_ops(dev, &dma_direct_ops);
+               dev_info(dev, "use dma_direct_ops cache ops\n");
+       } else {
+               set_dma_ops(dev, &dma_noncoherent_ops);
+               dev_info(dev, "use dma_noncoherent_ops cache ops\n");
+       }
+}
index ed94cf7e3157b0defd27b3f7e98625259a4b847b..d1516f85f25d3500c402e315b6f35c60bde2d253 100644 (file)
@@ -43,12 +43,12 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__ARMEB__
 AS             += -EB
-LDFLAGS                += -EB
+KBUILD_LDFLAGS += -EB
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__ARMEL__
 AS             += -EL
-LDFLAGS                += -EL
+KBUILD_LDFLAGS += -EL
 endif
 
 #
old mode 100755 (executable)
new mode 100644 (file)
index f0cbd86312dce5ddc1867d9eb2dc54059450a090..d4b7c59eec6853f2f836b5b3ffd686988ad0f6b1 100644 (file)
                        ti,hwmods = "rtc";
                        clocks = <&clk_32768_ck>;
                        clock-names = "int-clk";
+                       system-power-controller;
                        status = "disabled";
                };
 
index 9fb47724b9c1fa2dee8a6f1695fba9a68d30e80f..ad2ae25b7b4dbeb5fb714dee5af9182166d51457 100644 (file)
                reg = <0x40000000 0x08000000>;
        };
 
+       reg_vddio_sd0: regulator-vddio-sd0 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddio-sd0";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 29 0>;
+       };
+
+       reg_lcd_3v3: regulator-lcd-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 18 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_5v: regulator-lcd-5v {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-5v";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       panel {
+               compatible = "sii,43wvf1g";
+               backlight = <&backlight_display>;
+               dvdd-supply = <&reg_lcd_3v3>;
+               avdd-supply = <&reg_lcd_5v>;
+
+               port {
+                       panel_in: endpoint {
+                               remote-endpoint = <&display_out>;
+                       };
+               };
+       };
+
        apb@80000000 {
                apbh@80000000 {
                        gpmi-nand@8000c000 {
                        lcdif@80030000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&lcdif_24bit_pins_a>;
-                               lcd-supply = <&reg_lcd_3v3>;
-                               display = <&display0>;
                                status = "okay";
 
-                               display0: display0 {
-                                       bits-per-pixel = <32>;
-                                       bus-width = <24>;
-
-                                       display-timings {
-                                               native-mode = <&timing0>;
-                                               timing0: timing0 {
-                                                       clock-frequency = <9200000>;
-                                                       hactive = <480>;
-                                                       vactive = <272>;
-                                                       hback-porch = <15>;
-                                                       hfront-porch = <8>;
-                                                       vback-porch = <12>;
-                                                       vfront-porch = <4>;
-                                                       hsync-len = <1>;
-                                                       vsync-len = <1>;
-                                                       hsync-active = <0>;
-                                                       vsync-active = <0>;
-                                                       de-active = <1>;
-                                                       pixelclk-active = <0>;
-                                               };
+                               port {
+                                       display_out: endpoint {
+                                               remote-endpoint = <&panel_in>;
                                        };
                                };
                        };
                };
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               reg_vddio_sd0: regulator@0 {
-                       compatible = "regulator-fixed";
-                       reg = <0>;
-                       regulator-name = "vddio-sd0";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio1 29 0>;
-               };
-
-               reg_lcd_3v3: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "lcd-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio1 18 0>;
-                       enable-active-high;
-               };
-       };
-
-       backlight {
+       backlight_display: backlight {
                compatible = "pwm-backlight";
                pwms = <&pwm 2 5000000>;
                brightness-levels = <0 4 8 16 32 64 128 255>;
index 6b0ae667640f1c29c61390fccc9f3c3844e4d6e2..93ab5bdfe068a11a75276ebeaa4d31a99e473e67 100644 (file)
                reg = <0x40000000 0x08000000>;
        };
 
+
+       reg_3p3v: regulator-3p3v {
+               compatible = "regulator-fixed";
+               regulator-name = "3P3V";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-always-on;
+       };
+
+       reg_vddio_sd0: regulator-vddio-sd0 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddio-sd0";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio3 28 0>;
+       };
+
+       reg_fec_3v3: regulator-fec-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "fec-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio2 15 0>;
+       };
+
+       reg_usb0_vbus: regulator-usb0-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb0_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               gpio = <&gpio3 9 0>;
+               enable-active-high;
+       };
+
+       reg_usb1_vbus: regulator-usb1-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb1_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               gpio = <&gpio3 8 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_3v3: regulator-lcd-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio3 30 0>;
+               enable-active-high;
+       };
+
+       reg_can_3v3: regulator-can-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "can-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio2 13 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_5v: regulator-lcd-5v {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-5v";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       panel {
+               compatible = "sii,43wvf1g";
+               backlight = <&backlight_display>;
+               dvdd-supply = <&reg_lcd_3v3>;
+               avdd-supply = <&reg_lcd_5v>;
+
+               port {
+                       panel_in: endpoint {
+                               remote-endpoint = <&display_out>;
+                       };
+               };
+       };
+
        apb@80000000 {
                apbh@80000000 {
                        gpmi-nand@8000c000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&lcdif_24bit_pins_a
                                             &lcdif_pins_evk>;
-                               lcd-supply = <&reg_lcd_3v3>;
-                               display = <&display0>;
                                status = "okay";
 
-                               display0: display0 {
-                                       bits-per-pixel = <32>;
-                                       bus-width = <24>;
-
-                                       display-timings {
-                                               native-mode = <&timing0>;
-                                               timing0: timing0 {
-                                                       clock-frequency = <33500000>;
-                                                       hactive = <800>;
-                                                       vactive = <480>;
-                                                       hback-porch = <89>;
-                                                       hfront-porch = <164>;
-                                                       vback-porch = <23>;
-                                                       vfront-porch = <10>;
-                                                       hsync-len = <10>;
-                                                       vsync-len = <10>;
-                                                       hsync-active = <0>;
-                                                       vsync-active = <0>;
-                                                       de-active = <1>;
-                                                       pixelclk-active = <0>;
-                                               };
+                               port {
+                                       display_out: endpoint {
+                                               remote-endpoint = <&panel_in>;
                                        };
                                };
                        };
                };
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               reg_3p3v: regulator@0 {
-                       compatible = "regulator-fixed";
-                       reg = <0>;
-                       regulator-name = "3P3V";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       regulator-always-on;
-               };
-
-               reg_vddio_sd0: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "vddio-sd0";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio3 28 0>;
-               };
-
-               reg_fec_3v3: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "fec-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio2 15 0>;
-               };
-
-               reg_usb0_vbus: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "usb0_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio3 9 0>;
-                       enable-active-high;
-               };
-
-               reg_usb1_vbus: regulator@4 {
-                       compatible = "regulator-fixed";
-                       reg = <4>;
-                       regulator-name = "usb1_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio3 8 0>;
-                       enable-active-high;
-               };
-
-               reg_lcd_3v3: regulator@5 {
-                       compatible = "regulator-fixed";
-                       reg = <5>;
-                       regulator-name = "lcd-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio3 30 0>;
-                       enable-active-high;
-               };
-
-               reg_can_3v3: regulator@6 {
-                       compatible = "regulator-fixed";
-                       reg = <6>;
-                       regulator-name = "can-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio2 13 0>;
-                       enable-active-high;
-               };
-
-       };
-
        sound {
                compatible = "fsl,imx28-evk-sgtl5000",
                             "fsl,mxs-audio-sgtl5000";
                };
        };
 
-       backlight {
+       backlight_display: backlight {
                compatible = "pwm-backlight";
                pwms = <&pwm 2 5000000>;
                brightness-levels = <0 4 8 16 32 64 128 255>;
index 7cbc2ffa4b3a80affe055aecf6f744f4a61a9d00..7234e8330a576d8a53e7658d394e50211bbe2450 100644 (file)
                interrupt-names = "msi";
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0x7>;
-               interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+               /*
+                * Reference manual lists pci irqs incorrectly
+                * Real hardware ordering is same as imx6: D+MSI, C, B, A
+                */
+               interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
                         <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
                         <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
index 12d6822f00576f720c9c2086e061de4cb09fc299..04758a2a87f031eb42df555cc85a85bd0bf63816 100644 (file)
 &mmc2 {
        vmmc-supply = <&vsdio>;
        bus-width = <8>;
-       non-removable;
+       ti,non-removable;
 };
 
 &mmc3 {
                OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1)       /* abe_mcbsp3_fsx */
                >;
        };
-};
-
-&omap4_pmx_wkup {
-       usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
-               /* gpio_wk0 */
-               pinctrl-single,pins = <
-               OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
-               >;
-       };
 
        vibrator_direction_pin: pinmux_vibrator_direction_pin {
                pinctrl-single,pins = <
        };
 };
 
+&omap4_pmx_wkup {
+       usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
+               /* gpio_wk0 */
+               pinctrl-single,pins = <
+               OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+               >;
+       };
+};
+
 /*
  * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
  * uart1 wakeirq.
index 7cb235ef0fb6dba30e77d6c91a0b9eafda6359a3..6e9e1c2f9def9fa6c675db355a1095276e73c1fe 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f802c000 {
-                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
+                               compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index e2c127608bcc0942555579346f7108548b47a318..7eca43ff69bbed1f1d5d431a968826612812ed18 100644 (file)
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
 CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
 CONFIG_DRM_DW_HDMI_CEC=y
 CONFIG_DRM_IMX=y
index 148226e36152c02625e34f978c3673370aecf039..7b82128575351c9285ca5137e79ed73b9741462c 100644 (file)
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
 CONFIG_DRM_MXSFB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index df68dc4056e5770253b2995cc54263d20ef8e62f..5282324c7cefba5d9fc11c8c79b6a723053bc1b3 100644 (file)
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_VERSATILE=y
 CONFIG_AEABI=y
 CONFIG_OABI_COMPAT=y
-CONFIG_CMA=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=1f03 mem=32M"
 CONFIG_FPE_NWFPE=y
 CONFIG_VFP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_ARM_VERSATILE=y
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=y
 CONFIG_DRM_PL111=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -89,9 +90,10 @@ CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
 CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
+CONFIG_FONTS=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
index 79906cecb091e4fbf8dafb850d9d4b7ea19c3c41..3ad482d2f1eb91c8bfe6b597788e1e70b9521234 100644 (file)
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 2ceffd85dd3d3fbcc4f80ae831713b37d23a83f3..cd65ea4e9c54e633bd66a0178ca3f06ad16e8db9 100644 (file)
@@ -2160,6 +2160,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
        return -ENODEV;
 }
 
+/**
+ * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
+ *
+ * @oh: struct omap_hwmod *
+ * @np: struct device_node *
+ *
+ * Fix up module register offsets for modules with mpu_rt_idx.
+ * Only needed for cpsw with interconnect target module defined
+ * in device tree while still using legacy hwmod platform data
+ * for rev, sysc and syss registers.
+ *
+ * Can be removed when all cpsw hwmod platform data has been
+ * dropped.
+ */
+static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
+                                     struct device_node *np,
+                                     struct resource *res)
+{
+       struct device_node *child = NULL;
+       int error;
+
+       child = of_get_next_child(np, child);
+       if (!child)
+               return;
+
+       error = of_address_to_resource(child, oh->mpu_rt_idx, res);
+       if (error)
+               pr_err("%s: error mapping mpu_rt_idx: %i\n",
+                      __func__, error);
+}
+
 /**
  * omap_hwmod_parse_module_range - map module IO range from device tree
  * @oh: struct omap_hwmod *
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
        size = be32_to_cpup(ranges);
 
        pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
-                oh->name, np->name, base, size);
+                oh ? oh->name : "", np->name, base, size);
+
+       if (oh && oh->mpu_rt_idx) {
+               omap_hwmod_fix_mpu_rt_idx(oh, np, res);
+
+               return 0;
+       }
 
        res->start = base;
        res->end = base + size - 1;
index fafd3d7f9f8c4b6ff1576c756e65b06666c51c2c..8ca9265220269e4d66517a028485be7d947ebee9 100644 (file)
@@ -17,6 +17,7 @@ config ARCH_ROCKCHIP
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
        select ZONE_DMA if ARM_LPAE
+       select PM
        help
          Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
          containing the RK2928, RK30xx and RK31xx series.
index 29e75b47becd5a94c9e4cf7944e77a67cdca05d7..1b1a0e95c7511b9256f1953c00d0ca32994b2160 100644 (file)
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
 
 config HOLES_IN_ZONE
        def_bool y
-       depends on NUMA
 
 source kernel/Kconfig.hz
 
index 35f2e6e1be23f29dbbe8b511ce224be565bbb672..393d2b524284e79ab4a344779089432eb4ef31bd 100644 (file)
@@ -158,6 +158,7 @@ config ARCH_ROCKCHIP
        select GPIOLIB
        select PINCTRL
        select PINCTRL_ROCKCHIP
+       select PM
        select ROCKCHIP_TIMER
        help
          This enables support for the ARMv8 based Rockchip chipsets,
index efe61a2e4b5e0a0c5dbcb18c39c8de100941cd04..106039d25e2f7a708b81db27d7c95ae82aaa54dc 100644 (file)
@@ -62,14 +62,14 @@ CHECKFLAGS  += -D__AARCH64EB__
 AS             += -EB
 # Prefer the baremetal ELF build target, but not all toolchains include
 # it so fall back to the standard linux version if needed.
-LDFLAGS                += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
+KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 AS             += -EL
 # Same as above, prefer ELF but fall back to linux target if needed.
-LDFLAGS                += -EL $(call ld-option, -maarch64elf, -maarch64linux)
+KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
 UTS_MACHINE    := aarch64
 endif
 
index ceffc40810eec42e3655f80fcd39d66965617c73..48daec7f78ba7cf4e674687fca73ed976c3b5b4b 100644 (file)
@@ -46,6 +46,7 @@
        pinctrl-0 = <&mmc0_pins>;
        vmmc-supply = <&reg_cldo1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
        status = "okay";
 };
 
@@ -56,6 +57,7 @@
        vqmmc-supply = <&reg_bldo2>;
        non-removable;
        cap-mmc-hw-reset;
+       bus-width = <8>;
        status = "okay";
 };
 
index 2c5db03f226c60b595dad62e136e32ca4d769702..b2b7ced633cf6cc6ed5b52332ed8ef31e2eae0be 100644 (file)
@@ -63,7 +63,7 @@
                };
 
                idle-states {
-                       entry-method = "arm,psci";
+                       entry-method = "psci";
 
                        CPU_SLEEP_0: cpu-sleep-0 {
                                compatible = "arm,idle-state";
index c51950f4a1b66b4088331a8741edf38bfd6eec22..ab77adb4f3c20d46902bda9d96d2c77651a02b3a 100644 (file)
@@ -63,7 +63,7 @@
                };
 
                idle-states {
-                       entry-method = "arm,psci";
+                       entry-method = "psci";
 
                        CPU_SLEEP_0: cpu-sleep-0 {
                                compatible = "arm,idle-state";
index 2b2bf39c30ef68c1347f01457bb0818516fe2d43..1fb5c5a0f32e38876e25561d39548dfe700dde34 100644 (file)
@@ -62,7 +62,7 @@
                };
 
                idle-states {
-                       entry-method = "arm,psci";
+                       entry-method = "psci";
 
                        CPU_SLEEP_0: cpu-sleep-0 {
                                compatible = "arm,idle-state";
index 4c558a2133e252fd325df23725cc9a83a267f32c..68ac78c4564dc74cdfd0f40697c1b599bee72add 100644 (file)
@@ -43,7 +43,7 @@
                 * PSCI node is not added default, U-boot will add missing
                 * parts if it determines to use PSCI.
                 */
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_PH20: cpu-ph20 {
                        compatible = "arm,idle-state";
index b9f5d2ff4ff2934c0f7dcd6037312b6fe2dda4b6..7881e3d81a9aba6134fc66a7d35e6d02daa4009a 100644 (file)
@@ -87,7 +87,7 @@
                 * PSCI node is not added default, U-boot will add missing
                 * parts if it determines to use PSCI.
                 */
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_PH20: cpu-ph20 {
                        compatible = "arm,idle-state";
index 65ce1c3cb5684f22698f153df5e212b652ba1eb8..ef83786b8b905d57852ad6637bd2d4dfb5683c43 100644 (file)
@@ -83,7 +83,7 @@
                 * PSCI node is not added default, U-boot will add missing
                 * parts if it determines to use PSCI.
                 */
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_PH20: cpu-ph20 {
                        compatible = "arm,idle-state";
index 6d8532af834688d96e1bdb7394c3d2821da393e4..75cc0f7cc088869f219d66da2fad2592acd5153e 100644 (file)
                };
 
                idle-states {
-                       entry-method = "arm,psci";
+                       entry-method = "psci";
 
                        CPU_SLEEP_0: cpu-sleep-0 {
                                compatible = "arm,idle-state";
index 3f5160d2f1307b0db5076d3dc2844fffc544489f..48f5928ed45c5d68cb179a89e01f6e793116ad76 100644 (file)
        };
 
        idle-states{
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CORE_PD: core_pd {
                        compatible = "arm,idle-state";
index a091e6f030145fc09622270c00bd6a43391656ab..29ce23422acf221e0725e93949b99c79f975ccf7 100644 (file)
@@ -58,7 +58,7 @@
                };
 
                idle-states {
-                       entry-method = "arm,psci";
+                       entry-method = "psci";
 
                        CPU_SLEEP_0: cpu-sleep-0 {
                                compatible = "arm,idle-state";
index f67e8d5e93ad96df94aba1400b40fc24d3a52781..db8d364f84768b669333dbe21334fcbb3d3e81c3 100644 (file)
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
 CONFIG_ARCH_BERLIN=y
 CONFIG_ARCH_BRCMSTB=y
 CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_K3=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_LG1K=y
 CONFIG_ARCH_HISI=y
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
 CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_ARCH_K3_AM6_SOC=y
+CONFIG_SOC_TI=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
index 6e9f33d14930eb045d293c4df7952afa555b56fa..067d8937d5af1e74a69ae7b14b1b0306a53fe087 100644 (file)
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-               while (walk.nbytes >= AES_BLOCK_SIZE) {
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
                        int blocks = walk.nbytes / AES_BLOCK_SIZE;
                        u8 *dst = walk.dst.virt.addr;
                        u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
                                        NULL);
 
                        err = skcipher_walk_done(&walk,
-                                                walk.nbytes % AES_BLOCK_SIZE);
+                                                walk.nbytes % (2 * AES_BLOCK_SIZE));
                }
-               if (walk.nbytes)
+               if (walk.nbytes) {
                        __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
                                            nrounds);
+                       if (walk.nbytes > AES_BLOCK_SIZE) {
+                               crypto_inc(iv, AES_BLOCK_SIZE);
+                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
+                                                   ks + AES_BLOCK_SIZE, iv,
+                                                   nrounds);
+                       }
+               }
        }
 
        /* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-               while (walk.nbytes >= AES_BLOCK_SIZE) {
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
                        int blocks = walk.nbytes / AES_BLOCK_SIZE;
                        u8 *dst = walk.dst.virt.addr;
                        u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
                        } while (--blocks > 0);
 
                        err = skcipher_walk_done(&walk,
-                                                walk.nbytes % AES_BLOCK_SIZE);
+                                                walk.nbytes % (2 * AES_BLOCK_SIZE));
                }
-               if (walk.nbytes)
+               if (walk.nbytes) {
+                       if (walk.nbytes > AES_BLOCK_SIZE) {
+                               u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+                               memcpy(iv2, iv, AES_BLOCK_SIZE);
+                               crypto_inc(iv2, AES_BLOCK_SIZE);
+
+                               __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
+                                                   iv2, nrounds);
+                       }
                        __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
                                            nrounds);
+               }
        }
 
        /* handle the tail */
index b7fb5274b250134253173384ff643578f5c06a29..0c4fc223f22575fbf76ff66d3e077249d86ad8c1 100644 (file)
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
        crypto_unregister_alg(&sm4_ce_alg);
 }
 
-module_cpu_feature_match(SM3, sm4_ce_mod_init);
+module_cpu_feature_match(SM4, sm4_ce_mod_init);
 module_exit(sm4_ce_mod_fini);
index 1b5e0e843c3af8e8035c8f48ef518a3505adb881..7e2b3e360086311427a0bc77ec609b77b43b740d 100644 (file)
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm goto("1: nop\n\t"
+       asm_volatile_goto("1: nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm goto("1: b %l[l_yes]\n\t"
+       asm_volatile_goto("1: b %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
index f26055f2306e1f9a479417507c2edf428c9f99de..3d6d7336f871221fd29bcc3bc4faa2cee0a7765f 100644 (file)
@@ -61,8 +61,7 @@ struct kvm_arch {
        u64    vmid_gen;
        u32    vmid;
 
-       /* 1-level 2nd stage table and lock */
-       spinlock_t pgd_lock;
+       /* 1-level 2nd stage table, protected by kvm->mmu_lock */
        pgd_t *pgd;
 
        /* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 95ac7374d723e7fa48338caba5a32d6e9818377a..4c8b13bede80f98195ba1090fea11601a6d3df22 100644 (file)
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC)             += machine_kexec.o relocate_kernel.o    \
 arm64-obj-$(CONFIG_ARM64_RELOC_TEST)   += arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)         += crash_dump.o
+arm64-obj-$(CONFIG_CRASH_CORE)         += crash_core.o
 arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)  += sdei.o
 arm64-obj-$(CONFIG_ARM64_SSBD)         += ssbd.o
 
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644 (file)
index 0000000..ca4c3e1
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/memory.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+       VMCOREINFO_NUMBER(VA_BITS);
+       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+                                               kimage_voffset);
+       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+                                               PHYS_OFFSET);
+       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
index f6a5c6bc14348592cefc9967685af7de9afbe4d7..922add8adb7498ff4609725782d09a56b46781d6 100644 (file)
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
        }
 }
 #endif /* CONFIG_HIBERNATION */
-
-void arch_crash_save_vmcoreinfo(void)
-{
-       VMCOREINFO_NUMBER(VA_BITS);
-       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
-       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
-                                               kimage_voffset);
-       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
-                                               PHYS_OFFSET);
-       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
index d496ef579859627edd1ba98c1233d9584cd407e3..ca46153d79154bae1b0833231245129752484362 100644 (file)
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
        val = read_sysreg(cpacr_el1);
        val |= CPACR_EL1_TTA;
        val &= ~CPACR_EL1_ZEN;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val &= ~CPACR_EL1_FPEN;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cpacr_el1);
 
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        val = CPTR_EL2_DEFAULT;
        val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val |= CPTR_EL2_TFP;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cptr_el2);
 }
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
                write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 
-       __activate_traps_fpsimd32(vcpu);
        if (has_vhe())
                activate_traps_vhe(vcpu);
        else
index 65f86271f02bc1bf71a44b07fba51aefee49d381..8080c9f489c3e43af385066514f3f60cca629141 100644 (file)
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 
        pmd = READ_ONCE(*pmdp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pmd_present(pmd) || !pmd_table(pmd)) {
+       if (!pmd_present(pmd))
+               return 1;
+       if (!pmd_table(pmd)) {
                VM_WARN_ON(!pmd_table(pmd));
                return 1;
        }
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
 
        pud = READ_ONCE(*pudp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pud_present(pud) || !pud_table(pud)) {
+       if (!pud_present(pud))
+               return 1;
+       if (!pud_table(pud)) {
                VM_WARN_ON(!pud_table(pud));
                return 1;
        }
index 6ab942e6c534a01be558b1a1e85511b4f6dacbe9..3fe8a948e94cf93e554e7b6b2c18e9175cd4b87f 100644 (file)
@@ -23,8 +23,7 @@ ifdef CONFIG_CPU_BIG_ENDIAN
 KBUILD_CFLAGS   += -mbig-endian
 KBUILD_AFLAGS   += -mbig-endian
 LINKFLAGS       += -mbig-endian
-KBUILD_LDFLAGS  += -mbig-endian
-LDFLAGS += -EB
+KBUILD_LDFLAGS  += -mbig-endian -EB
 CHECKFLAGS     += -D_BIG_ENDIAN
 endif
 
index cc12b162c22286097918d120ac1dd5cafbe834cf..58634e6bae92893aedbf4172940f9e56ee97ec27 100644 (file)
@@ -22,7 +22,7 @@ KBUILD_CFLAGS += -mint32 -fno-builtin
 KBUILD_CFLAGS += -D__linux__
 KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
 KBUILD_AFLAGS += $(aflags-y)
-LDFLAGS += $(ldflags-y)
+KBUILD_LDFLAGS += $(ldflags-y)
 
 CHECKFLAGS += -msize-long
 
index 2efaa18e995ad8d6694b6532bea265aeb3460658..4c5858b80f0eebe108d6d75bfbcfa55d8a65a381 100644 (file)
@@ -22,9 +22,7 @@ ldflags-y += $(call cc-option,-mv${CONFIG_HEXAGON_ARCH_VERSION})
 
 KBUILD_CFLAGS += $(cflags-y)
 KBUILD_AFLAGS += $(aflags-y)
-
-#  no KBUILD_LDFLAGS?
-LDFLAGS += $(ldflags-y)
+KBUILD_LDFLAGS += $(ldflags-y)
 
 # Thread-info register will be r19.  This value is not configureable;
 # it is hard-coded in several files.
index 5e4a59b3ec1bb042f25de860a8489340a7add291..2691a1857d203db2522ae178fe744225c71d2da9 100644 (file)
@@ -211,7 +211,7 @@ static inline long ffz(int x)
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
        int r;
 
@@ -232,7 +232,7 @@ static inline long fls(int x)
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
        int r;
 
index 77459df34e2e7e18b29eb62e097641d7f3c1a6d9..7ebe7ad19d155803dfa50087471e3bcd000f22c6 100644 (file)
@@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                        panic("Can't create %s() memory pool!", __func__);
                else
                        gen_pool_add(coherent_pool,
-                               pfn_to_virt(max_low_pfn),
+                               (unsigned long)pfn_to_virt(max_low_pfn),
                                hexagon_coherent_pool_size, -1);
        }
 
index f0dd9fc84002a3415ed5ee5680f06fa8a02f5069..997c9f20ea0f937667031760193efe02fec00faf 100644 (file)
@@ -69,7 +69,7 @@ KBUILD_CFLAGS += -D__uClinux__
 KBUILD_AFLAGS += -D__uClinux__
 endif
 
-LDFLAGS := -m m68kelf
+KBUILD_LDFLAGS := -m m68kelf
 KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
 
 ifdef CONFIG_SUN3
index 3534aa6a4dc2bc7e833001739582af3660a579d1..1b083c500b9a170beb6c030d882f9d9f675369c5 100644 (file)
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
 
        if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
                return 0;
-       while (!req.complete)
-               pmu_poll();
+       pmu_wait_complete(&req);
 
-       time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
-                    (req.reply[3] << 8) | req.reply[4]);
+       time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
+                    (req.reply[2] << 8) | req.reply[3]);
 
        return time - RTC_OFFSET;
 }
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                        (data >> 8) & 0xFF, data & 0xFF) < 0)
                return;
-       while (!req.complete)
-               pmu_poll();
+       pmu_wait_complete(&req);
 }
 
 static __u8 pmu_read_pram(int offset)
index 70dde040779b56feaeaa650a7c342a23750a1fce..f5453d944ff5e19d341925590cca753ffb4fcd7e 100644 (file)
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
        high_memory = (void *)_ramend;
 
        /* Reserve kernel text/data/bss */
-       memblock_reserve(memstart, memstart - _rambase);
+       memblock_reserve(_rambase, memstart - _rambase);
 
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
index 73330360a8e6d78632b265f4da06b2c208e25e83..4f3ab5707265204f309b31695cb3a60a4f7a8d1c 100644 (file)
@@ -40,11 +40,11 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
 ifdef CONFIG_CPU_BIG_ENDIAN
 KBUILD_CFLAGS += -mbig-endian
 KBUILD_AFLAGS += -mbig-endian
-LDFLAGS += -EB
+KBUILD_LDFLAGS += -EB
 else
 KBUILD_CFLAGS += -mlittle-endian
 KBUILD_AFLAGS += -mlittle-endian
-LDFLAGS += -EL
+KBUILD_LDFLAGS += -EL
 endif
 
 CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
index 5425df002a6b11965b9c22273a1b20ba128afbd6..d74b3742fa5d8d38d0b3995d988119b8c8b15cf5 100644 (file)
@@ -309,7 +309,7 @@ endif
 # instead of .eh_frame so we don't discard them.
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
-LDFLAGS                        += -m $(ld-emul)
+KBUILD_LDFLAGS         += -m $(ld-emul)
 
 ifdef CONFIG_MIPS
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
index abe77add8789042441d400a3d01434d41dcfc8de..3c453a1f1ff10c218e809ec1f5efe2bb70b1ef7d 100644 (file)
@@ -92,7 +92,7 @@ UIMAGE_LOADADDR = $(VMLINUZ_LOAD_ADDRESS)
 vmlinuzobjs-y += $(obj)/piggy.o
 
 quiet_cmd_zld = LD      $@
-      cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
+      cmd_zld = $(LD) $(KBUILD_LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
 quiet_cmd_strip = STRIP          $@
       cmd_strip = $(STRIP) -s $@
 vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
index a9af1d2dcd699114d00a55689c29137cef384841..2c1c53d12179302140d3576dddd11a732a5b13d9 100644 (file)
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
                                                   bool write);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 4901833498f7e3878b458d23afb876e7812a95f1..8441b2698e6472e9454aec3bb730e94b029fdd21 100644 (file)
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
        int desc;                       /* the current descriptor */
        struct ltq_dma_desc *desc_base; /* the descriptor base */
        int phys;                       /* physical addr */
+       struct device *dev;
 };
 
 enum {
index 019035d7225c4fd942c96c6628b6605f8d2af1b4..8f845f6e5f4266568288969b9b19b7357b86598b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -20,6 +21,7 @@
 
 #include <asm/abi.h>
 #include <asm/mips-cps.h>
+#include <asm/page.h>
 #include <asm/vdso.h>
 
 /* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vvar_size = gic_size + PAGE_SIZE;
        size = vvar_size + image->size;
 
+       /*
+        * Find a region that's large enough for us to perform the
+        * colour-matching alignment below.
+        */
+       if (cpu_has_dc_aliases)
+               size += shm_align_mask + 1;
+
        base = get_unmapped_area(NULL, 0, size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
        }
 
+       /*
+        * If we suffer from dcache aliasing, ensure that the VDSO data page
+        * mapping is coloured the same as the kernel's mapping of that memory.
+        * This ensures that when the kernel updates the VDSO data userland
+        * will observe it without requiring cache invalidations.
+        */
+       if (cpu_has_dc_aliases) {
+               base = __ALIGN_MASK(base, shm_align_mask);
+               base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+       }
+
        data_addr = base + gic_size;
        vdso_addr = data_addr + PAGE_SIZE;
 
index ee64db03279336db79ac5c98e7634074d47608ac..d8dcdb350405900928b83e7afa2112ecf3122518 100644 (file)
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
        return 1;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
-       kvm_mips_callbacks->flush_shadow_all(kvm);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
index 4b9fbb6744adecfe17fdba69649e8f95f875dfcb..664f2f7f55c1c06f34e58a551fa917a27f1f9dff 100644 (file)
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
        unsigned long flags;
 
        ch->desc = 0;
-       ch->desc_base = dma_zalloc_coherent(NULL,
+       ch->desc_base = dma_zalloc_coherent(ch->dev,
                                LTQ_DESC_NUM * LTQ_DESC_SIZE,
                                &ch->phys, GFP_ATOMIC);
 
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
        if (!ch->desc_base)
                return;
        ltq_dma_close(ch);
-       dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+       dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
                ch->desc_base, ch->phys);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_free);
index 9ab1326f57c976f5643394dacc21075c534eb4d9..78ce4cff101271c2c1bd9582e0b278102fb9448e 100644 (file)
@@ -38,7 +38,7 @@ $(obj)/rom.bin: $(obj)/rom
 
 # Rule to make the bootloader
 $(obj)/rom: $(addprefix $(obj)/,$(OBJECTS))
-       $(LD) $(LDFLAGS) $(LDSCRIPT) -o $@ $^
+       $(LD) $(KBUILD_LDFLAGS) $(LDSCRIPT) -o $@ $^
 
 $(obj)/%.o: $(obj)/%.gz
        $(LD) -r -o $@ -b binary $<
index 1d4248fa55e995fdc7cba95f9209372ed6533b52..7068f341133d7eb038bb94a9953a3b9946d9bf51 100644 (file)
@@ -40,6 +40,10 @@ config NDS32
        select NO_IOPORT_MAP
        select RTC_LIB
        select THREAD_INFO_IN_TASK
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_DYNAMIC_FTRACE
        help
          Andes(nds32) Linux support.
 
index 031c676821ff8797a879c6af56aeffd9ea4c4ed3..3509fac104919ff8d9727d4cdfe2185d10473746 100644 (file)
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
 
 comma = ,
 
+ifdef CONFIG_FUNCTION_TRACER
+arch-y += -malways-save-lp -mno-relax
+endif
+
 KBUILD_CFLAGS  += $(call cc-option, -mno-sched-prolog-epilog)
 KBUILD_CFLAGS  += -mcmodel=large
 
@@ -33,12 +37,12 @@ endif
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 KBUILD_CFLAGS   += $(call cc-option, -EL)
 KBUILD_AFLAGS   += $(call cc-option, -EL)
-LDFLAGS         += $(call cc-option, -EL)
+KBUILD_LDFLAGS  += $(call cc-option, -EL)
 CHECKFLAGS      += -D__NDS32_EL__
 else
 KBUILD_CFLAGS   += $(call cc-option, -EB)
 KBUILD_AFLAGS   += $(call cc-option, -EB)
-LDFLAGS         += $(call cc-option, -EB)
+KBUILD_LDFLAGS  += $(call cc-option, -EB)
 CHECKFLAGS      += -D__NDS32_EB__
 endif
 
index 56c47905880256455a17943427651b0bb72e03ed..f5f9cf7e054401431f89b72af891c5a44b20baba 100644 (file)
@@ -121,9 +121,9 @@ struct elf32_hdr;
  */
 #define ELF_CLASS      ELFCLASS32
 #ifdef __NDS32_EB__
-#define ELF_DATA       ELFDATA2MSB;
+#define ELF_DATA       ELFDATA2MSB
 #else
-#define ELF_DATA       ELFDATA2LSB;
+#define ELF_DATA       ELFDATA2LSB
 #endif
 #define ELF_ARCH       EM_NDS32
 #define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..2f96cc9
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_NDS32_FTRACE_H
+#define __ASM_NDS32_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+/* mcount call is composed of three instructions:
+ * sethi + ori + jral
+ */
+#define MCOUNT_INSN_SIZE 12
+
+extern void _mcount(unsigned long parent_ip);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
+
+#ifdef __NDS32_EL__
+#define INSN_NOP               0x09000040
+#define INSN_SIZE(insn)                (((insn & 0x00000080) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0x000000fe) == 0x00000046)
+#define ENDIAN_CONVERT(insn)   be32_to_cpu(insn)
+#else /* __NDS32_EB__ */
+#define INSN_NOP               0x40000009
+#define INSN_SIZE(insn)                (((insn & 0x80000000) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0xfe000000) == 0x46000000)
+#define ENDIAN_CONVERT(insn)   (insn)
+#endif
+
+extern void _ftrace_caller(unsigned long parent_ip);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* __ASM_NDS32_FTRACE_H */
index 19b19394a936cfdecaa51dba9159ee03473b76e7..68c38151c3e41c24c24081f92449a097bf077f02 100644 (file)
@@ -17,6 +17,7 @@
 #else
 #define FP_OFFSET (-2)
 #endif
+#define LP_OFFSET (-1)
 
 extern void __init early_trap_init(void);
 static inline void GIE_ENABLE(void)
index 18a009f3804d5e94ac23bd67b6eb4144ffa2c614..362a32d9bd16871e1db6c45d544eb2c4450bfeb5 100644 (file)
@@ -38,7 +38,7 @@ struct exception_table_entry {
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS      ((mm_segment_t) { ~0UL })
-#define USER_DS        ((mm_segment_t) {TASK_SIZE - 1})
+#define USER_DS                ((mm_segment_t) {TASK_SIZE - 1})
 
 #define get_ds()       (KERNEL_DS)
 #define get_fs()       (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b)    ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
 
-#define access_ok(type, addr, size)                 \
+#define access_ok(type, addr, size)    \
        __range_ok((unsigned long)addr, (unsigned long)size)
 /*
  * Single-value transfer routines.  They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
  * versions are void (ie, don't return a value as such).
  */
 
-#define get_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_READ,  p, sizeof(*p)))) {            \
-               __e = __get_user(x,p);                                  \
-       } else                                                          \
-               x = 0;                                                  \
-       __e;                                                            \
-})
-#define __get_user(x,ptr)                                              \
+#define get_user       __get_user                                      \
+
+#define __get_user(x, ptr)                                             \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       __get_user_err((x),(ptr),__gu_err);                             \
+       __get_user_check((x), (ptr), __gu_err);                         \
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x,ptr,err)                                    \
+#define __get_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __get_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __get_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
 })
 
-#define __get_user_err(x,ptr,err)                                      \
+#define __get_user_check(x, ptr, err)                                  \
+({                                                                     \
+       const __typeof__(*(ptr)) __user *__p = (ptr);                   \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
+               __get_user_err((x), __p, (err));                        \
+       } else {                                                        \
+               (x) = 0; (err) = -EFAULT;                               \
+       }                                                               \
+})
+
+#define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm("lbi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lbi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm("lhi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lhi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm("lwi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lwi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __get_user_asm_dword(__gu_val,__gu_addr,err);           \
+               __get_user_asm_dword(__gu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
                break;                                                  \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
-#define __get_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move %0, %3\n"                                          \
-       "       move %1, #0\n"                                          \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err), "=&r" (x)                                         \
-       : "r" (addr), "i" (-EFAULT)                                     \
-       : "cc")
+#define __get_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move %0, %3\n"                                  \
+               "       move %1, #0\n"                                  \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err), "=&r" (x)                                 \
+               : "r" (addr), "i" (-EFAULT)                             \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do {                                                                      \
 #endif
 
 #define __get_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                           \
-       "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err), "=&r"(x)                                           \
-       : "r"(addr), "i"(-EFAULT)                                       \
-       : "cc")
-#define put_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_WRITE,  p, sizeof(*p)))) {           \
-               __e = __put_user(x,p);                                  \
-       }                                                               \
-       __e;                                                            \
-})
-#define __put_user(x,ptr)                                              \
+       __asm__ __volatile__ (                                          \
+               "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                   \
+               "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err), "=&r"(x)                                   \
+               : "r"(addr), "i"(-EFAULT)                               \
+               : "cc")
+
+#define put_user       __put_user                                      \
+
+#define __put_user(x, ptr)                                             \
 ({                                                                     \
        long __pu_err = 0;                                              \
-       __put_user_err((x),(ptr),__pu_err);                             \
+       __put_user_err((x), (ptr), __pu_err);                           \
        __pu_err;                                                       \
 })
 
-#define __put_user_error(x,ptr,err)                                    \
+#define __put_user_error(x, ptr, err)                                  \
+({                                                                     \
+       __put_user_err((x), (ptr), (err));                              \
+       (void)0;                                                        \
+})
+
+#define __put_user_check(x, ptr, err)                                  \
 ({                                                                     \
-       __put_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
+               __put_user_err((x), __p, (err));                        \
+       } else  {                                                       \
+               (err) = -EFAULT;                                        \
+       }                                                               \
 })
 
-#define __put_user_err(x,ptr,err)                                      \
+#define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __put_user_asm("sbi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("sbi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm("shi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("shi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm("swi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("swi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __put_user_asm_dword(__pu_val,__pu_addr,err);           \
+               __put_user_asm_dword(__pu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
@@ -211,22 +219,22 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
-#define __put_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move    %0, %3\n"                                       \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err)                                                    \
-       : "r" (x), "r" (addr), "i" (-EFAULT)                            \
-       : "cc")
+#define __put_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move    %0, %3\n"                               \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err)                                            \
+               : "r" (x), "r" (addr), "i" (-EFAULT)                    \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do {                                                                      \
 #endif
 
 #define __put_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                           \
-       "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err)                                                     \
-       : "r"(addr), "r"(x), "i"(-EFAULT)                               \
-       : "cc")
+       __asm__ __volatile__ (                                          \
+               "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                   \
+               "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err)                                             \
+               : "r"(addr), "r"(x), "i"(-EFAULT)                       \
+               : "cc")
+
 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
 extern long strncpy_from_user(char *dest, const char __user * src, long count);
 extern __must_check long strlen_user(const char __user * str);
index 42792743e8b953290b7cec0b4722684c72744df3..27cded39fa66266171a427afc988ad9db35fe75e 100644 (file)
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
 
 
 obj-y                          += vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER)   += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
index 0c6d031a1c4a9df00dc991c0f52dbdd7bddab51c..0c5386e72098e45f48b925efee3431bb275a1398 100644 (file)
@@ -9,7 +9,8 @@
 
 void __iomem *atl2c_base;
 static const struct of_device_id atl2c_ids[] __initconst = {
-       {.compatible = "andestech,atl2c",}
+       {.compatible = "andestech,atl2c",},
+       {}
 };
 
 static int __init atl2c_of_init(void)
index b8ae4e9a6b93db793d0b6b7d434adcdb47827743..21a144071566989af1daa55400a0c9fab98b34ee 100644 (file)
@@ -118,7 +118,7 @@ common_exception_handler:
        /* interrupt */
 2:
 #ifdef CONFIG_TRACE_IRQFLAGS
-       jal     trace_hardirqs_off
+       jal     __trace_hardirqs_off
 #endif
        move    $r0, $sp
        sethi   $lp, hi20(ret_from_intr)
index 03e4f7788a1882a15cf89e68aa325978cbc3b0ce..f00af92f7e22fde904fc6e54999a9ebcdde7e950 100644 (file)
@@ -138,8 +138,8 @@ no_work_pending:
 #ifdef CONFIG_TRACE_IRQFLAGS
        lwi     $p0, [$sp+(#IPSW_OFFSET)]
        andi    $p0, $p0, #0x1
-       la      $r10, trace_hardirqs_off
-       la      $r9, trace_hardirqs_on
+       la      $r10, __trace_hardirqs_off
+       la      $r9, __trace_hardirqs_on
        cmovz   $r9, $p0, $r10
        jral    $r9
 #endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..a0a9679
--- /dev/null
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+                                    struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+       /* save all state by the compiler prologue */
+
+       unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+       if (ftrace_trace_function != ftrace_stub)
+               ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+                                     NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+           || ftrace_graph_entry != ftrace_graph_entry_stub)
+               ftrace_graph_caller();
+#endif
+
+       /* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+       /* save all state needed by the compiler prologue */
+
+       /*
+        * prepare arguments for real tracing function
+        * first  arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+        * second arg : parent_ip
+        */
+       __asm__ __volatile__ (
+               "move $r1, %0                              \n\t"
+               "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+               :
+               : "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+       /* a placeholder for the call to a real tracing function */
+       __asm__ __volatile__ (
+               "ftrace_call:           \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* a placeholder for the call to ftrace_graph_caller */
+       __asm__ __volatile__ (
+               "ftrace_graph_call:     \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+#endif
+       /* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+       return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x46000000;
+       unsigned long imm = addr >> 12;
+       unsigned long rt_num = 0xf << 20;
+
+       return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x58000000;
+       unsigned long imm = addr & 0x0000fff;
+       unsigned long rt_num = 0xf << 20;
+       unsigned long ra_num = 0xf << 15;
+
+       return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x4a000001;
+       unsigned long rt_num = 0x1e << 20;
+       unsigned long rb_num = 0xf << 10;
+
+       return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+                                unsigned long addr)
+{
+       call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u       */
+       call_insns[1] = gen_ori_insn(addr);   /* ori   $r15, $r15, imm15u */
+       call_insns[2] = gen_jral_insn(addr);  /* jral  $lp,  $r15         */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                               unsigned long *new_insn, bool validate)
+{
+       unsigned long orig_insn[3];
+
+       if (validate) {
+               if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+                       return -EFAULT;
+               if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+                       return -EINVAL;
+       }
+
+       if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                             unsigned long *new_insn, bool validate)
+{
+       int ret;
+
+       ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+       if (ret)
+               return ret;
+
+       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long pc = (unsigned long)&ftrace_call;
+       unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       if (func != ftrace_stub)
+               ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+       return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+                   unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
+               return;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer, NULL);
+
+       if (err == -EBUSY)
+               return;
+
+       *parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+       unsigned long *parent_ip =
+               (unsigned long *)(__builtin_frame_address(2) - 4);
+
+       unsigned long selfpc =
+               (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+       unsigned long frame_pointer =
+               (unsigned long)__builtin_frame_address(3);
+
+       prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+       __asm__ __volatile__ (
+               /* save state needed by the ABI     */
+               "smw.adm $r0,[$sp],$r1,#0x0  \n\t"
+
+               /* get original return address      */
+               "move $r0, $fp               \n\t"
+               "bal ftrace_return_to_handler\n\t"
+               "move $lp, $r0               \n\t"
+
+               /* restore state nedded by the ABI  */
+               "lmw.bim $r0,[$sp],$r1,#0x0  \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       unsigned long pc = (unsigned long)&ftrace_graph_call;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+       if (enable)
+               return ftrace_modify_code(pc, nop_insn, call_insn, true);
+       else
+               return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+       trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+       trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
index 4167283d8293f16c65e5f1b100a40201265a1c26..1e31829cbc2a71ec4dd3c6be62874245cda010c2 100644 (file)
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
index 8b231e910ea68980dbd517be895200ad19e49f55..d974c0c1c65f34123af8c7d2a63e2fb3e390e9f9 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
 
 void save_stack_trace(struct stack_trace *trace)
 {
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        unsigned long *fpn;
        int skip = trace->skip;
        int savesched;
+       int graph_idx = 0;
 
        if (tsk == current) {
                __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
               && (fpn >= (unsigned long *)TASK_SIZE)) {
                unsigned long lpp, fpp;
 
-               lpp = fpn[-1];
+               lpp = fpn[LP_OFFSET];
                fpp = fpn[FP_OFFSET];
                if (!__kernel_text_address(lpp))
                        break;
+               else
+                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
 
                if (savesched || !in_sched_functions(lpp)) {
                        if (skip) {
index a6205fd4db521eaf83c606d399975a4fe275c250..1496aab48998817c00cb175dbd8b09b3453d73dd 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kdebug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/uaccess.h>
+#include <linux/ftrace.h>
 
 #include <asm/proc-fns.h>
 #include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
        set_fs(fs);
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#include <linux/ftrace.h>
-static void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-       if (*addr == (unsigned long)return_to_handler) {
-               int index = tsk->curr_ret_stack;
-
-               if (tsk->ret_stack && index >= *graph) {
-                       index -= *graph;
-                       *addr = tsk->ret_stack[index].ret;
-                       (*graph)++;
-               }
-       }
-}
-#else
-static inline void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-}
-#endif
-
 #define LOOP_TIMES (100)
 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
 {
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                while (!kstack_end(base_reg)) {
                        ret_addr = *base_reg++;
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                       !((unsigned long)base_reg & 0x3) &&
                       ((unsigned long)base_reg >= TASK_SIZE)) {
                        unsigned long next_fp;
-#if !defined(NDS32_ABI_2)
-                       ret_addr = base_reg[0];
-                       next_fp = base_reg[1];
-#else
-                       ret_addr = base_reg[-1];
+                       ret_addr = base_reg[LP_OFFSET];
                        next_fp = base_reg[FP_OFFSET];
-#endif
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
        pr_emerg("CPU: %i\n", smp_processor_id());
        show_regs(regs);
        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
-                tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+                tsk->comm, tsk->pid, end_of_stack(tsk));
 
        if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->sp,
-                        THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+               dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
                dump_instr(regs);
                dump_stack();
        }
index 288313b886efa269fb0e13eb9231d50879e72947..9e90f30a181d7d9c9b06dc04d230c02bfde67b78 100644 (file)
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
 ENTRY(_stext_lma)
 jiffies = jiffies_64;
 
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x)     x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
 SECTIONS
 {
        _stext_lma = TEXTADDR - LOAD_OFFSET;
        . = TEXTADDR;
        __init_begin = .;
        HEAD_TEXT_SECTION
+       .exit.text : {
+               NDS32_EXIT_KEEP(EXIT_TEXT)
+       }
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
+       .exit.data : {
+               NDS32_EXIT_KEEP(EXIT_DATA)
+       }
        PERCPU_SECTION(L1_CACHE_BYTES)
        __init_end = .;
 
index 7a49f0d28d14c106ea9300c8e54ad18aa6471e0b..f1da8a7b17ff49b4965f4f5d9e647b9e1d1e258d 100644 (file)
@@ -3,15 +3,6 @@
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
-config DEBUG_STACK_USAGE
-       bool "Enable stack utilization instrumentation"
-       depends on DEBUG_KERNEL
-       help
-         Enables the display of the minimum amount of free stack which each
-         task has ever had available in the sysrq-T and sysrq-P debug output.
-
-         This option will slow down process creation somewhat.
-
 config EARLY_PRINTK
        bool "Activate early kernel debugging"
        default y
index db0b6eebbfa5b55a6f7c4f55b1d489bcf158d974..a80669209155383343ba8adbb90b3e8427e2afdb 100644 (file)
@@ -177,7 +177,6 @@ config PPC
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
-       select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_CBPF_JIT                    if !PPC64
index 8397c7bd5880805a457f5db43b35425f6787310e..11a1acba164a1629f10a3080946c80f64feb0eca 100644 (file)
@@ -76,14 +76,14 @@ endif
 
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 KBUILD_CFLAGS  += -mlittle-endian
-LDFLAGS                += -EL
+KBUILD_LDFLAGS += -EL
 LDEMULATION    := lppc
 GNUTARGET      := powerpcle
 MULTIPLEWORD   := -mno-multiple
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-save-toc-indirect)
 else
 KBUILD_CFLAGS += $(call cc-option,-mbig-endian)
-LDFLAGS                += -EB
+KBUILD_LDFLAGS += -EB
 LDEMULATION    := ppc
 GNUTARGET      := powerpc
 MULTIPLEWORD   := -mmultiple
@@ -108,7 +108,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN)  += -mlittle-endian
 ifeq ($(HAS_BIARCH),y)
 KBUILD_CFLAGS  += -m$(BITS)
 KBUILD_AFLAGS  += -m$(BITS) -Wl,-a$(BITS)
-LDFLAGS                += -m elf$(BITS)$(LDEMULATION)
+KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
 KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET)
 endif
 
index 3c0e8fb2b773ebaf3bdfe404b2909d2d5ed48b4a..68e14afecac85b1d1fd0eff661f6a3aabe0217a6 100644 (file)
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        unsigned long pp, key;
        unsigned long v, orig_v, gr;
        __be64 *hptep;
-       int index;
+       long int index;
        int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
        if (kvm_is_radix(vcpu->kvm))
index 0af1c0aea1fe659fca4723e17a02cc17eb8fa08f..fd6e8c13685f4c0223749647ad04c34a71ad4589 100644 (file)
@@ -725,10 +725,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
                                              gpa, shift);
                kvmppc_radix_tlbie_page(kvm, gpa, shift);
                if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
-                       unsigned long npages = 1;
+                       unsigned long psize = PAGE_SIZE;
                        if (shift)
-                               npages = 1ul << (shift - PAGE_SHIFT);
-                       kvmppc_update_dirty_map(memslot, gfn, npages);
+                               psize = 1ul << shift;
+                       kvmppc_update_dirty_map(memslot, gfn, psize);
                }
        }
        return 0;                               
index 4a892d894a0f1b2d96ed319ab0f4a622104c7526..dbd8f762140b6942b32f41a8466cb3bfc7aee840 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/pgalloc.h>
 
-static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDA(mmu_context_ida);
 
 static int alloc_context_id(int min_id, int max_id)
 {
-       int index, err;
-
-again:
-       if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
-               return -ENOMEM;
-
-       spin_lock(&mmu_context_lock);
-       err = ida_get_new_above(&mmu_context_ida, min_id, &index);
-       spin_unlock(&mmu_context_lock);
-
-       if (err == -EAGAIN)
-               goto again;
-       else if (err)
-               return err;
-
-       if (index > max_id) {
-               spin_lock(&mmu_context_lock);
-               ida_remove(&mmu_context_ida, index);
-               spin_unlock(&mmu_context_lock);
-               return -ENOMEM;
-       }
-
-       return index;
+       return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
 }
 
 void hash__reserve_context_id(int id)
 {
-       int rc, result = 0;
-
-       do {
-               if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
-                       break;
-
-               spin_lock(&mmu_context_lock);
-               rc = ida_get_new_above(&mmu_context_ida, id, &result);
-               spin_unlock(&mmu_context_lock);
-       } while (rc == -EAGAIN);
+       int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
 
        WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
 }
@@ -172,9 +140,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 
 void __destroy_context(int context_id)
 {
-       spin_lock(&mmu_context_lock);
-       ida_remove(&mmu_context_ida, context_id);
-       spin_unlock(&mmu_context_lock);
+       ida_free(&mmu_context_ida, context_id);
 }
 EXPORT_SYMBOL_GPL(__destroy_context);
 
@@ -182,13 +148,11 @@ static void destroy_contexts(mm_context_t *ctx)
 {
        int index, context_id;
 
-       spin_lock(&mmu_context_lock);
        for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
                context_id = ctx->extended_id[index];
                if (context_id)
-                       ida_remove(&mmu_context_ida, context_id);
+                       ida_free(&mmu_context_ida, context_id);
        }
-       spin_unlock(&mmu_context_lock);
 }
 
 static void pte_frag_destroy(void *pte_frag)
index ff9f488123310b0503dcb1b139af6d3ee4afe567..e59e0e60e5b5755609f15fa8f2a87d0e497f2a04 100644 (file)
@@ -515,35 +515,17 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
        return 0;
 }
 
-static DEFINE_SPINLOCK(vas_ida_lock);
-
 static void vas_release_window_id(struct ida *ida, int winid)
 {
-       spin_lock(&vas_ida_lock);
-       ida_remove(ida, winid);
-       spin_unlock(&vas_ida_lock);
+       ida_free(ida, winid);
 }
 
 static int vas_assign_window_id(struct ida *ida)
 {
-       int rc, winid;
-
-       do {
-               rc = ida_pre_get(ida, GFP_KERNEL);
-               if (!rc)
-                       return -EAGAIN;
-
-               spin_lock(&vas_ida_lock);
-               rc = ida_get_new(ida, &winid);
-               spin_unlock(&vas_ida_lock);
-       } while (rc == -EAGAIN);
-
-       if (rc)
-               return rc;
+       int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL);
 
-       if (winid > VAS_WINDOWS_PER_CHIP) {
-               pr_err("Too many (%d) open windows\n", winid);
-               vas_release_window_id(ida, winid);
+       if (winid == -ENOSPC) {
+               pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP);
                return -EAGAIN;
        }
 
index 9ddd88bb30b724c4683cd52a4195be604b1a57a4..61ec42405ec9630082beb941cb58a963127c13ad 100644 (file)
@@ -29,7 +29,7 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
        KBUILD_CFLAGS   += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
 
        KBUILD_MARCH = rv64im
-       LDFLAGS += -melf64lriscv
+       KBUILD_LDFLAGS += -melf64lriscv
 else
        BITS := 32
        UTS_MACHINE := riscv32
@@ -37,7 +37,7 @@ else
        KBUILD_CFLAGS += -mabi=ilp32
        KBUILD_AFLAGS += -mabi=ilp32
        KBUILD_MARCH = rv32im
-       LDFLAGS += -melf32lriscv
+       KBUILD_LDFLAGS += -melf32lriscv
 endif
 
 KBUILD_CFLAGS += -Wall
index c229509288ea2d77c9efe72cfb1ccb0105371296..439dc7072e05bf37a722bb983b69dabed39651fa 100644 (file)
 #ifndef _ASM_RISCV_TLB_H
 #define _ASM_RISCV_TLB_H
 
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
 #include <asm-generic/tlb.h>
 
 static inline void tlb_flush(struct mmu_gather *tlb)
index db20dc630e7efbadeb8ebeb497d5685a3cbad682..aee6031230306a934747c64edb4b61f6928e9e8b 100644 (file)
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
 #ifdef CONFIG_BLK_DEV_INITRD
 static void __init setup_initrd(void)
 {
-       extern char __initramfs_start[];
-       extern unsigned long __initramfs_size;
        unsigned long size;
 
-       if (__initramfs_size > 0) {
-               initrd_start = (unsigned long)(&__initramfs_start);
-               initrd_end = initrd_start + __initramfs_size;
-       }
-
        if (initrd_start >= initrd_end) {
                printk(KERN_INFO "initrd not found or empty");
                goto disable;
index 568026ccf6e8712fa59dc3d1e216924f6d1c5c4d..fb03a4482ad60ac7ca66f86429897275020c5ffd 100644 (file)
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
        uintptr_t, flags)
 {
-#ifdef CONFIG_SMP
-       struct mm_struct *mm = current->mm;
-       bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
-#endif
-
        /* Check the reserved flags. */
        if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
                return -EINVAL;
 
-       /*
-        * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
-        * which generates unused variable warnings all over this function.
-        */
-#ifdef CONFIG_SMP
-       flush_icache_mm(mm, local);
-#else
-       flush_icache_all();
-#endif
+       flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
 
        return 0;
 }
index ba6d122526fb408f858214e4e52da10e50b49490..ee65185bbc807284b8729b5c9d52c129fafba30e 100644 (file)
@@ -11,7 +11,7 @@
 #
 
 LD_BFD         := elf64-s390
-LDFLAGS                := -m elf64_s390
+KBUILD_LDFLAGS := -m elf64_s390
 KBUILD_AFLAGS_MODULE += -fPIC
 KBUILD_CFLAGS_MODULE += -fPIC
 KBUILD_AFLAGS  += -m64
index 80b27294c1de0844f07d01c5aecbb5410a38602e..ab9a0ebecc199b52507246b47db7b79dd0420058 100644 (file)
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
                              walk->dst.virt.addr, walk->src.virt.addr, n);
                if (k)
                        ret = blkcipher_walk_done(desc, walk, nbytes - k);
-               if (n < k) {
+               if (k < n) {
                        if (__cbc_paes_set_key(ctx) != 0)
                                return blkcipher_walk_done(desc, walk, -EIO);
                        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
index f31a15044c24a56875661aa6d3e195d75bd9ef9c..a8418e1379eb7ee08c92acd034eae000cb19c695 100644 (file)
@@ -16,7 +16,13 @@ typedef struct {
        unsigned long asce;
        unsigned long asce_limit;
        unsigned long vdso_base;
-       /* The mmu context allocates 4K page tables. */
+       /*
+        * The following bitfields need a down_write on the mm
+        * semaphore when they are written to. As they are only
+        * written once, they can be read without a lock.
+        *
+        * The mmu context allocates 4K page tables.
+        */
        unsigned int alloc_pgste:1;
        /* The mmu context uses extended page tables. */
        unsigned int has_pgste:1;
index 91ad4a9425c0b74c024f07fe8347651c91f4bfb7..f69333fd2fa3818c5eeb8bff9240f67e29f37ef0 100644 (file)
@@ -695,7 +695,9 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                        r = -EINVAL;
                else {
                        r = 0;
+                       down_write(&kvm->mm->mmap_sem);
                        kvm->mm->context.allow_gmap_hpage_1m = 1;
+                       up_write(&kvm->mm->mmap_sem);
                        /*
                         * We might have to create fake 4k page
                         * tables. To avoid that the hardware works on
index d68f10441a164f2c22236adaa143edf6b1d0f7d0..8679bd74d337a583a3dde940d0cef1f427373a4b 100644 (file)
@@ -280,9 +280,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        vcpu->run->s.regs.gprs[reg1] &= ~0xff;
        vcpu->run->s.regs.gprs[reg1] |= key;
        return 0;
@@ -324,9 +326,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc < 0)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        kvm_s390_set_psw_cc(vcpu, rc);
        return 0;
 }
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
                                              FAULT_FLAG_WRITE, &unlocked);
                        rc = !rc ? -EAGAIN : rc;
                }
+               up_read(&current->mm->mmap_sem);
                if (rc == -EFAULT)
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-               up_read(&current->mm->mmap_sem);
-               if (rc >= 0)
-                       start += PAGE_SIZE;
+               if (rc < 0)
+                       return rc;
+               start += PAGE_SIZE;
        }
 
        if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                                                      FAULT_FLAG_WRITE, &unlocked);
                                rc = !rc ? -EAGAIN : rc;
                        }
+                       up_read(&current->mm->mmap_sem);
                        if (rc == -EFAULT)
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-                       up_read(&current->mm->mmap_sem);
-                       if (rc >= 0)
-                               start += PAGE_SIZE;
+                       if (rc == -EAGAIN)
+                               continue;
+                       if (rc < 0)
+                               return rc;
                }
+               start += PAGE_SIZE;
        }
        if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
                if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
index 63844b95c22c9902df769928313b988ee7d7667b..a2b28cd1e3fedb2bdcc6dbb1cff530ba0e3371a7 100644 (file)
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                return set_validity_icpt(scb_s, 0x0039U);
 
        /* copy only the wrapping keys */
-       if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+       if (read_guest_real(vcpu, crycb_addr + 72,
+                           vsie_page->crycb.dea_wrapping_key_mask, 56))
                return set_validity_icpt(scb_s, 0x0035U);
 
        scb_s->ecb3 |= ecb3_flags;
index 65300193b99f316c6d3f39c7db60f545272789c3..c521ade2557c5157f675edaa92c40f23585273fd 100644 (file)
@@ -122,11 +122,11 @@ endif
 ifdef CONFIG_CPU_LITTLE_ENDIAN
 ld-bfd                 := elf32-$(UTS_MACHINE)-linux
 LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
-LDFLAGS                        += -EL
+KBUILD_LDFLAGS         += -EL
 else
 ld-bfd                 := elf32-$(UTS_MACHINE)big-linux
 LDFLAGS_vmlinux                += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
-LDFLAGS                        += -EB
+KBUILD_LDFLAGS         += -EB
 endif
 
 export ld-bfd BITS
index e32ef20de567702368bcfbfd7de371150aeedd5a..048a033d6102739c2a2d16099aac733dbe864f3f 100644 (file)
@@ -21,7 +21,7 @@ ifeq ($(CONFIG_SPARC32),y)
 #
 
 CHECKFLAGS     += -D__sparc__
-LDFLAGS        := -m elf32_sparc
+KBUILD_LDFLAGS := -m elf32_sparc
 export BITS    := 32
 UTS_MACHINE    := sparc
 
@@ -40,7 +40,7 @@ else
 #
 
 CHECKFLAGS    += -D__sparc__ -D__sparc_v9__ -D__arch64__
-LDFLAGS       := -m elf64_sparc
+KBUILD_LDFLAGS := -m elf64_sparc
 export BITS   := 64
 UTS_MACHINE   := sparc64
 
index 3641a294ed5478ef3065d0cca08b6c54618fc0e2..e4abe9b8f97a60ba0304479e975b16442bfa9af7 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
 #include <asm/leon.h>
 #include <asm/leon_amba.h>
 
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
 
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
                       dp->full_name);
index 44e4d4435bed78032b1b7a0d9935e577ec196538..6df6086968c6a6064b2fe35f3c5ff03e7083fbc2 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
                dev_set_name(&op->dev, "root");
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
 
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
index 44ddc3e8fa662087bde49aea114c5e4a2f382e6b..ab1066c38944ee40bae5d76ca5d0da2d966bccea 100644 (file)
@@ -133,7 +133,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
 # The wrappers will select whether using "malloc" or the kernel allocator.
 LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
 
-LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
+LD_FLAGS_CMDLINE = $(foreach opt,$(KBUILD_LDFLAGS),-Wl,$(opt))
 
 # Used by link-vmlinux.sh which has special support for um link
 export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
index c5ff296bc5d1252f6eaaf847755a3c88fed52c89..1a0be022f91d8d6d89bc154642e3bd29619e483c 100644 (file)
@@ -2843,7 +2843,7 @@ config X86_SYSFB
          This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
          framebuffers so the new generic system-framebuffer drivers can be
          used on x86. If the framebuffer is not compatible with the generic
-         modes, it is adverticed as fallback platform framebuffer so legacy
+         modes, it is advertised as fallback platform framebuffer so legacy
          drivers like efifb, vesafb and uvesafb can pick it up.
          If this option is not selected, all system framebuffers are always
          marked as fallback platform framebuffers as usual.
index 7e3c07d6ad424b228b0c50f2370fdb195794b61c..8f6e7eb8ae9fc2342b79cb0fc65de922dee11f2e 100644 (file)
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
   endif
 endif
 
-ifndef CC_HAVE_ASM_GOTO
-  $(error Compiler lacks asm-goto support.)
-endif
-
-#
-# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
-# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226).  There's no way
-# to test for this bug at compile-time because the test case needs to execute,
-# which is a no-go for cross compilers.  So check the GCC version instead.
-#
-ifdef CONFIG_JUMP_LABEL
-  ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
-       ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
-  endif
-endif
-
 ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
        # This compiler flag is not supported by Clang:
        KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
@@ -219,7 +203,7 @@ sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA2
 KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
 KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
 
-LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
 
 #
 # The 64-bit kernel must be aligned to 2MB.  Pass -z max-page-size=0x200000 to
@@ -227,7 +211,7 @@ LDFLAGS := -m elf_$(UTS_MACHINE)
 # by the linker.
 #
 ifdef CONFIG_X86_64
-LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
+KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
 endif
 
 # Speed up the build
@@ -312,6 +296,13 @@ PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
 
+archprepare: checkbin
+checkbin:
+ifndef CC_HAVE_ASM_GOTO
+       @echo Compiler lacks asm-goto support.
+       @exit 1
+endif
+
 archclean:
        $(Q)rm -rf $(objtree)/arch/i386
        $(Q)rm -rf $(objtree)/arch/x86_64
index 5296f8c9e7f0dc13f46f5844ebfa8c37aab3b455..91085a08de6cbb1e42bc6b106346256586b6740e 100644 (file)
@@ -4,7 +4,7 @@ core-y += arch/x86/crypto/
 ifeq ($(CONFIG_X86_32),y)
 START := 0x8048000
 
-LDFLAGS                        += -m elf_i386
+KBUILD_LDFLAGS         += -m elf_i386
 ELF_ARCH               := i386
 ELF_FORMAT             := elf32-i386
 CHECKFLAGS     += -D__i386__
@@ -43,7 +43,7 @@ KBUILD_CFLAGS += -fno-builtin -m64
 
 CHECKFLAGS  += -m64 -D__x86_64__
 KBUILD_AFLAGS += -m64
-LDFLAGS += -m elf_x86_64
+KBUILD_LDFLAGS += -m elf_x86_64
 KBUILD_CPPFLAGS += -m64
 
 ELF_ARCH := i386:x86-64
index 169c2feda14a055472a5e81481c2a4d2bd5717b8..28764dacf0182f5ca5ae26e6a2ff6caf700655b5 100644 (file)
@@ -42,16 +42,16 @@ KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 GCOV_PROFILE := n
 UBSAN_SANITIZE :=n
 
-LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
 # Compressed kernel should be built as PIE since it may be loaded at any
 # address by the bootloader.
 ifeq ($(CONFIG_X86_32),y)
-LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
+KBUILD_LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
 else
 # To build 64-bit compressed kernel as PIE, we disable relocation
 # overflow check to avoid relocation overflow error with a new linker
 # command-line option, -z noreloc-overflow.
-LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
+KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
        && echo "-z noreloc-overflow -pie --no-dynamic-linker")
 endif
 LDFLAGS_vmlinux := -T
index acd11b3bf639e0a50013014e5eaeca6b11083c28..2a356b948720e10f93d49765a071c3c6dd6edb36 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 2071c3d1ae07575143cc4d6262e92eaeef9ba560..dbe8bb980da15c46cb3c815d0ab529cd2522a346 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index b5f2a8fd5a713ca986e2d3ef24aa1b69d421ced2..8bebda2de92fe3f4453cb453596faeb4dfbfd6bb 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 9bd139569b410d9e41cff15fb36aeb366930be95..cb2deb61c5d96dc53aabdeb7bd880ed436f75db7 100644 (file)
@@ -223,34 +223,34 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        pcmpeqd TWOONE(%rip), \TMP2
        pand    POLY(%rip), \TMP2
        pxor    \TMP2, \TMP3
-       movdqa  \TMP3, HashKey(%arg2)
+       movdqu  \TMP3, HashKey(%arg2)
 
        movdqa     \TMP3, \TMP5
        pshufd     $78, \TMP3, \TMP1
        pxor       \TMP3, \TMP1
-       movdqa     \TMP1, HashKey_k(%arg2)
+       movdqu     \TMP1, HashKey_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^2<<1 (mod poly)
-       movdqa     \TMP5, HashKey_2(%arg2)
+       movdqu     \TMP5, HashKey_2(%arg2)
 # HashKey_2 = HashKey^2<<1 (mod poly)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_2_k(%arg2)
+       movdqu     \TMP1, HashKey_2_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^3<<1 (mod poly)
-       movdqa     \TMP5, HashKey_3(%arg2)
+       movdqu     \TMP5, HashKey_3(%arg2)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_3_k(%arg2)
+       movdqu     \TMP1, HashKey_3_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^3<<1 (mod poly)
-       movdqa     \TMP5, HashKey_4(%arg2)
+       movdqu     \TMP5, HashKey_4(%arg2)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_4_k(%arg2)
+       movdqu     \TMP1, HashKey_4_k(%arg2)
 .endm
 
 # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@@ -271,7 +271,7 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
 
        PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
-       movdqa HashKey(%arg2), %xmm13
+       movdqu HashKey(%arg2), %xmm13
 
        CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
        %xmm4, %xmm5, %xmm6
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pshufd    $78, \XMM5, \TMP6
        pxor      \XMM5, \TMP6
        paddd     ONE(%rip), \XMM0              # INCR CNT
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
        movdqa    \XMM0, \XMM1
        paddd     ONE(%rip), \XMM0              # INCR CNT
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pxor      (%arg1), \XMM2
        pxor      (%arg1), \XMM3
        pxor      (%arg1), \XMM4
-       movdqa    HashKey_4_k(%arg2), \TMP5
+       movdqu    HashKey_4_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
        movaps 0x10(%arg1), \TMP1
        AESENC    \TMP1, \XMM1              # Round 1
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM6, \TMP1
        pshufd    $78, \XMM6, \TMP2
        pxor      \XMM6, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
        movaps 0x30(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 3
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_3_k(%arg2), \TMP5
+       movdqu    HashKey_3_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x50(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 5
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM7, \TMP1
        pshufd    $78, \XMM7, \TMP2
        pxor      \XMM7, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
 
         # Multiply TMP5 * HashKey using karatsuba
 
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_2_k(%arg2), \TMP5
+       movdqu    HashKey_2_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x80(%arg1), \TMP3
        AESENC    \TMP3, \XMM1             # Round 8
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM8, \TMP1
        pshufd    $78, \XMM8, \TMP2
        pxor      \XMM8, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
        movaps 0x90(%arg1), \TMP3
        AESENC    \TMP3, \XMM1            # Round 9
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
        AESENCLAST \TMP3, \XMM2
        AESENCLAST \TMP3, \XMM3
        AESENCLAST \TMP3, \XMM4
-       movdqa    HashKey_k(%arg2), \TMP5
+       movdqu    HashKey_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
        movdqu    (%arg4,%r11,1), \TMP3
        pxor      \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pshufd    $78, \XMM5, \TMP6
        pxor      \XMM5, \TMP6
        paddd     ONE(%rip), \XMM0              # INCR CNT
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
        movdqa    \XMM0, \XMM1
        paddd     ONE(%rip), \XMM0              # INCR CNT
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pxor      (%arg1), \XMM2
        pxor      (%arg1), \XMM3
        pxor      (%arg1), \XMM4
-       movdqa    HashKey_4_k(%arg2), \TMP5
+       movdqu    HashKey_4_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
        movaps 0x10(%arg1), \TMP1
        AESENC    \TMP1, \XMM1              # Round 1
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM6, \TMP1
        pshufd    $78, \XMM6, \TMP2
        pxor      \XMM6, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
        movaps 0x30(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 3
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_3_k(%arg2), \TMP5
+       movdqu    HashKey_3_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x50(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 5
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM7, \TMP1
        pshufd    $78, \XMM7, \TMP2
        pxor      \XMM7, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
 
         # Multiply TMP5 * HashKey using karatsuba
 
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_2_k(%arg2), \TMP5
+       movdqu    HashKey_2_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x80(%arg1), \TMP3
        AESENC    \TMP3, \XMM1             # Round 8
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM8, \TMP1
        pshufd    $78, \XMM8, \TMP2
        pxor      \XMM8, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
        movaps 0x90(%arg1), \TMP3
        AESENC    \TMP3, \XMM1            # Round 9
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
        AESENCLAST \TMP3, \XMM2
        AESENCLAST \TMP3, \XMM3
        AESENCLAST \TMP3, \XMM4
-       movdqa    HashKey_k(%arg2), \TMP5
+       movdqu    HashKey_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
        movdqu    (%arg4,%r11,1), \TMP3
        pxor      \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM1, \TMP6
        pshufd    $78, \XMM1, \TMP2
        pxor      \XMM1, \TMP2
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP6       # TMP6 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM1       # XMM1 = a0*b0
-       movdqa    HashKey_4_k(%arg2), \TMP4
+       movdqu    HashKey_4_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        movdqa    \XMM1, \XMMDst
        movdqa    \TMP2, \XMM1              # result in TMP6, XMMDst, XMM1
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM2, \TMP1
        pshufd    $78, \XMM2, \TMP2
        pxor      \XMM2, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM2       # XMM2 = a0*b0
-       movdqa    HashKey_3_k(%arg2), \TMP4
+       movdqu    HashKey_3_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM2, \XMMDst
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM3, \TMP1
        pshufd    $78, \XMM3, \TMP2
        pxor      \XMM3, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM3       # XMM3 = a0*b0
-       movdqa    HashKey_2_k(%arg2), \TMP4
+       movdqu    HashKey_2_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM3, \XMMDst
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM4, \TMP1
        pshufd    $78, \XMM4, \TMP2
        pxor      \XMM4, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1        # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM4       # XMM4 = a0*b0
-       movdqa    HashKey_k(%arg2), \TMP4
+       movdqu    HashKey_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM4, \XMMDst
index 95cf857d2cbb1943ba8ce356c48416837f1655d9..f40244eaf14d23211c122e309247865ebcbe9965 100644 (file)
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
 static int __init crypto_morus1280_sse2_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 615fb7bc9a323d949d038a8496125eab0c4bc4ba..9afaf8f8565a6eb9dc7b29a99c68ca1b153fad48 100644 (file)
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
 static int __init crypto_morus640_sse2_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 9f695f517747e409868226b0c7aea4afb29215ec..fa3f439f0a9200321a96efd2ba874dbda4824cff 100644 (file)
@@ -68,9 +68,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
 
-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
+$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 
 #
 # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -132,11 +132,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
 KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
 KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(obj)/vdso32.so.dbg: FORCE \
index 5f4829f10129c5bfd59b803d0ddd4a21be0b3107..dfb2f7c0d0192bcd16569d03badd498f355accf7 100644 (file)
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
 
        perf_callchain_store(entry, regs->ip);
 
-       if (!current->mm)
+       if (!nmi_uaccess_okay())
                return;
 
        if (perf_callchain_user32(regs, entry))
index f3e006bed9a75f94aa8bb9aa388d3037a2d71cbe..c88ed39582a10095b41b364bf6532b0f5298c6e2 100644 (file)
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
 
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+       /* Knights Landing does have MISPREDICT bit */
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+               x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
index b143717b92b3447c480495e23be68470bc7bf463..ce84388e540c918a01e31599bc78881a5b9d2954 100644 (file)
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 
 /**
  * arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic_inc arch_atomic_inc
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_inc arch_atomic_inc
 
 /**
  * arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic_dec arch_atomic_dec
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_dec arch_atomic_dec
 
 /**
  * arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 
 /**
  * arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 
 /**
  * arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic_add_negative arch_atomic_add_negative
 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic_add_negative arch_atomic_add_negative
 
 /**
  * arch_atomic_add_return - add integer and return
index ef959f02d0702b8d16979f1dd3ad6c1f405363e9..6a5b0ec460da8c5c6f4fe83074c6b2ec916bae80 100644 (file)
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static inline void arch_atomic64_inc(atomic64_t *v)
 {
        __alternative_atomic64(inc, inc_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static inline void arch_atomic64_dec(atomic64_t *v)
 {
        __alternative_atomic64(dec, dec_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
                             "S" (v) : "ecx", "edx", "memory");
        return r;
 }
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
                             "S" (v) : "ecx", "memory");
        return r;
 }
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #undef alternative_atomic64
 #undef __alternative_atomic64
index 4343d9b4f30e32056d5f6d5d1d6a273b556a3bba..5f851d92eecd9ee8eaad86c6b002937633e9144f 100644 (file)
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 
 /**
  * arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 
 /**
  * arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 
 /**
  * arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic64_add_negative arch_atomic64_add_negative
 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic64_add_negative arch_atomic64_add_negative
 
 /**
  * arch_atomic64_add_return - add and return
index c14f2a74b2be7495f1ee00c92322a58cc43d10a6..15450a675031d3562b4a9da3d3b15816c11003fb 100644 (file)
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
        return flags;
 }
 
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
 {
        asm volatile("push %0 ; popf"
                     : /* no output */
index 395c9631e000a3a17aa574c1b25fcc2cafd5b5fb..75f1e35e7c1537d8323cf7ce17d05c95759c350b 100644 (file)
@@ -22,10 +22,20 @@ enum die_val {
        DIE_NMIUNKNOWN,
 };
 
+enum show_regs_mode {
+       SHOW_REGS_SHORT,
+       /*
+        * For when userspace crashed, but we don't think it's our fault, and
+        * therefore don't print kernel registers.
+        */
+       SHOW_REGS_USER,
+       SHOW_REGS_ALL
+};
+
 extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
 extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
index 00ddb0c9e612a6e084298ef0f7372b924b76ba12..8e90488c3d56895f62080666e2141df1731092f3 100644 (file)
@@ -1237,19 +1237,12 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
 #define EMULTYPE_SKIP              (1 << 2)
-#define EMULTYPE_RETRY             (1 << 3)
-#define EMULTYPE_NO_REEXECUTE      (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL     (1 << 5)
-#define EMULTYPE_VMWARE                    (1 << 6)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
-                           int emulation_type, void *insn, int insn_len);
-
-static inline int emulate_instruction(struct kvm_vcpu *vcpu,
-                       int emulation_type)
-{
-       return x86_emulate_instruction(vcpu, 0,
-                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
-}
+#define EMULTYPE_ALLOW_RETRY       (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL     (1 << 4)
+#define EMULTYPE_VMWARE                    (1 << 5)
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len);
 
 void kvm_enable_efer_bits(u64);
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1443,6 @@ asmlinkage void kvm_spurious_fault(void);
        ____kvm_handle_fault_on_reboot(insn, "")
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1455,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit);
 
 u64 kvm_get_arch_capabilities(void);
index 8c7b3e5a2d019262eafbb9ccd0514f3278c9255e..3a17107594c88a83f013ecc1123c10d76c6d30c2 100644 (file)
@@ -148,6 +148,7 @@ enum mce_notifier_prios {
        MCE_PRIO_LOWEST         = 0,
 };
 
+struct notifier_block;
 extern void mce_register_decode_chain(struct notifier_block *nb);
 extern void mce_unregister_decode_chain(struct notifier_block *nb);
 
index 24c6cf5f16b72eb536a9ee279b15e54f0d22af80..60d0f90153178b3fb104360536854d676f7429ee 100644 (file)
@@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
-#endif
        *pmdp = pmd;
 }
 
@@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #ifdef CONFIG_SMP
 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
-#endif
        return __pmd(xchg((pmdval_t *)xp, 0));
 }
 #else
@@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #ifdef CONFIG_SMP
 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
-#endif
        return __pud(xchg((pudval_t *)xp, 0));
 }
 #else
index a564084c6141d42f603b257160a34685e1bffdbe..f8b1ad2c38280c823c103b5d3f6326a340c1060f 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
 #define _ASM_X86_PGTABLE_3LEVEL_H
 
+#include <asm/atomic64_32.h>
+
 /*
  * Intel Physical Address Extension (PAE) Mode - three-level page
  * tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 {
        pte_t res;
 
-       /* xchg acts as a barrier before the setting of the high bits */
-       res.pte_low = xchg(&ptep->pte_low, 0);
-       res.pte_high = ptep->pte_high;
-       ptep->pte_high = 0;
+       res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
 
        return res;
 }
index e4ffa565a69f0633cab94325bd894eac4bbe6a19..690c0307afed0932974e5965ccf7bed98daa10f9 100644 (file)
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                return xchg(pmdp, pmd);
        } else {
                pmd_t old = *pmdp;
-               *pmdp = pmd;
+               WRITE_ONCE(*pmdp, pmd);
                return old;
        }
 }
index f773d5e6c8cc465da7069987f9f24d6507c259af..ce2b59047cb839053043508d75ea47f8a726c8b9 100644 (file)
@@ -55,15 +55,15 @@ struct mm_struct;
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-                                   pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
 {
-       *ptep = native_make_pte(0);
+       WRITE_ONCE(*ptep, pte);
 }
 
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                   pte_t *ptep)
 {
-       *ptep = pte;
+       native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +73,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +109,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +137,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        pgd_t pgd;
 
        if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
-               *p4dp = p4d;
+               WRITE_ONCE(*p4dp, p4d);
                return;
        }
 
        pgd = native_make_pgd(native_p4d_val(p4d));
        pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
-       *p4dp = native_make_p4d(native_pgd_val(pgd));
+       WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +153,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pti_set_user_pgtbl(pgdp, pgd);
+       WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
index 682286aca88141c5ec5315597020f6e7e0c9b0fc..d53c54b842daca1c847d8076aadd89564b23cf00 100644 (file)
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
+       /* Address space bits used by the cache internally */
+       u8                      x86_cache_bits;
        unsigned                initialized : 1;
 } __randomize_layout;
 
@@ -181,9 +183,9 @@ extern const struct seq_operations cpuinfo_op;
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
 
-static inline unsigned long l1tf_pfn_limit(void)
+static inline unsigned long long l1tf_pfn_limit(void)
 {
-       return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
+       return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
 }
 
 extern void early_cpu_init(void);
index 34cffcef7375dfa15cb30832972aa3d71e86d678..07a25753e85c5cd53b2613a71db91862fa31684f 100644 (file)
@@ -89,4 +89,46 @@ extern int kernel_set_to_readonly;
 void set_kernel_text_rw(void);
 void set_kernel_text_ro(void);
 
+#ifdef CONFIG_X86_64
+static inline int set_mce_nospec(unsigned long pfn)
+{
+       unsigned long decoy_addr;
+       int rc;
+
+       /*
+        * Mark the linear address as UC to make sure we don't log more
+        * errors because of speculative access to the page.
+        * We would like to just call:
+        *      set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
+        * but doing that would radically increase the odds of a
+        * speculative access to the poison page because we'd have
+        * the virtual address of the kernel 1:1 mapping sitting
+        * around in registers.
+        * Instead we get tricky.  We create a non-canonical address
+        * that looks just like the one we want, but has bit 63 flipped.
+        * This relies on set_memory_uc() properly sanitizing any __pa()
+        * results with __PHYSICAL_MASK or PTE_PFN_MASK.
+        */
+       decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
+
+       rc = set_memory_uc(decoy_addr, 1);
+       if (rc)
+               pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
+       return rc;
+}
+#define set_mce_nospec set_mce_nospec
+
+/* Restore full speculative operation to the pfn. */
+static inline int clear_mce_nospec(unsigned long pfn)
+{
+       return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
+}
+#define clear_mce_nospec clear_mce_nospec
+#else
+/*
+ * Few people would run a 32-bit kernel on a machine that supports
+ * recoverable errors because they have too much memory to boot 32-bit.
+ */
+#endif
+
 #endif /* _ASM_X86_SET_MEMORY_H */
index 5f9012ff52ed3c879887df556613979fb0e7d184..33d3c88a7225ff938d81c8912d5ec05457b07e60 100644 (file)
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
 
 #define __ARCH_HAS_SA_RESTORER
 
+#include <asm/asm.h>
 #include <uapi/asm/sigcontext.h>
 
 #ifdef __i386__
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
 
 static inline int __gen_sigismember(sigset_t *set, int _sig)
 {
-       unsigned char ret;
-       asm("btl %2,%1\n\tsetc %0"
-           : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+       bool ret;
+       asm("btl %2,%1" CC_SET(c)
+           : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
        return ret;
 }
 
index b6dc698f992a54646266ba7a4dc2901e7bdfa109..f335aad404a479e98e4a5d38dc41ee5e5aa419ec 100644 (file)
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
        return (unsigned long)frame;
 }
 
-void show_opcodes(u8 *rip, const char *loglvl);
+void show_opcodes(struct pt_regs *regs, const char *loglvl);
 void show_ip(struct pt_regs *regs, const char *loglvl);
 #endif /* _ASM_X86_STACKTRACE_H */
index 29c9da6c62fc16b8b28bec203eb8982b2f570db5..58ce5288878e85db5c475d8891c0fff2817a20e4 100644 (file)
@@ -175,8 +175,16 @@ struct tlb_state {
         * are on.  This means that it may not match current->active_mm,
         * which will contain the previous user mm when we're in lazy TLB
         * mode even if we've already switched back to swapper_pg_dir.
+        *
+        * During switch_mm_irqs_off(), loaded_mm will be set to
+        * LOADED_MM_SWITCHING during the brief interrupts-off window
+        * when CR3 and loaded_mm would otherwise be inconsistent.  This
+        * is for nmi_uaccess_okay()'s benefit.
         */
        struct mm_struct *loaded_mm;
+
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+
        u16 loaded_mm_asid;
        u16 next_asid;
        /* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
 };
 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm.  It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+static inline bool nmi_uaccess_okay(void)
+{
+       struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+       struct mm_struct *current_mm = current->mm;
+
+       VM_WARN_ON_ONCE(!loaded_mm);
+
+       /*
+        * The condition we want to check is
+        * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
+        * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+        * is supposed to be reasonably fast.
+        *
+        * Instead, we check the almost equivalent but somewhat conservative
+        * condition below, and we rely on the fact that switch_mm_irqs_off()
+        * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+        */
+       if (loaded_mm != current_mm)
+               return false;
+
+       VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+       return true;
+}
+
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
index fb856c9f04494b6633d9ea8fd3ebb9204cc2c620..53748541c487ab00d8bd1ee9b4d05305aa33171d 100644 (file)
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
         *
         * If RDPID is available, use it.
         */
-       alternative_io ("lsl %[p],%[seg]",
+       alternative_io ("lsl %[seg],%[p]",
                        ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
                        X86_FEATURE_RDPID,
                        [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
index 014f214da5815d62f8f094f5e69f670c440ef69b..b9d5e7c9ef43e66c0b8d19fc0346be1779fa78d4 100644 (file)
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
  * It means the size must be writable atomically and the address must be aligned
  * in a way that permits an atomic write. It also makes sure we fit on a single
  * page.
- *
- * Note: Must be called under text_mutex.
  */
 void *text_poke(void *addr, const void *opcode, size_t len)
 {
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
         */
        BUG_ON(!after_bootmem);
 
+       lockdep_assert_held(&text_mutex);
+
        if (!core_kernel_text((unsigned long)addr)) {
                pages[0] = vmalloc_to_page(addr);
                pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
  *     - replace the first byte (int3) by the first byte of
  *       replacing opcode
  *     - sync cores
- *
- * Note: must be called under text_mutex.
  */
 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
 {
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
        bp_int3_handler = handler;
        bp_int3_addr = (u8 *)addr + sizeof(int3);
        bp_patching_in_progress = true;
+
+       lockdep_assert_held(&text_mutex);
+
        /*
         * Corresponding read barrier in int3 notifier for making sure the
         * in_progress and handler are correctly ordered wrt. patching.
index 9f148e3d45b4fe2ec57eecf5bd13ba528f97fa37..7654febd510277bedf28f8cef6a5d599bcfe1f5c 100644 (file)
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
        if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
                /* Something in the core code broke! Survive gracefully */
                pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
-               return EINVAL;
+               return -EINVAL;
        }
 
        ret = assign_managed_vector(irqd, vector_searchmask);
index ec00d1ff5098b3701b1f1af324be2576676b7786..f7151cd03cb08145a5c596d28c676abaf8e1f1bf 100644 (file)
@@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
        return 0;
 }
 
+#ifdef CONFIG_PROC_FS
 static int proc_apm_show(struct seq_file *m, void *v)
 {
        unsigned short  bx;
@@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
                   units);
        return 0;
 }
+#endif
 
 static int apm(void *unused)
 {
index cb4a16292aa7cb84c21d175844f3122db016a0a0..40bdaea97fe7cac25f2b04bc239fdc520d68f09e 100644 (file)
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
 
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+       if (c->x86 != 6)
+               return;
+
+       switch (c->x86_model) {
+       case INTEL_FAM6_NEHALEM:
+       case INTEL_FAM6_WESTMERE:
+       case INTEL_FAM6_SANDYBRIDGE:
+       case INTEL_FAM6_IVYBRIDGE:
+       case INTEL_FAM6_HASWELL_CORE:
+       case INTEL_FAM6_HASWELL_ULT:
+       case INTEL_FAM6_HASWELL_GT3E:
+       case INTEL_FAM6_BROADWELL_CORE:
+       case INTEL_FAM6_BROADWELL_GT3E:
+       case INTEL_FAM6_SKYLAKE_MOBILE:
+       case INTEL_FAM6_SKYLAKE_DESKTOP:
+       case INTEL_FAM6_KABYLAKE_MOBILE:
+       case INTEL_FAM6_KABYLAKE_DESKTOP:
+               if (c->x86_cache_bits < 44)
+                       c->x86_cache_bits = 44;
+               break;
+       }
+}
+
 static void __init l1tf_select_mitigation(void)
 {
        u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
        if (!boot_cpu_has_bug(X86_BUG_L1TF))
                return;
 
+       override_cache_bits(&boot_cpu_data);
+
        switch (l1tf_mitigation) {
        case L1TF_MITIGATION_OFF:
        case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,14 +735,13 @@ static void __init l1tf_select_mitigation(void)
        return;
 #endif
 
-       /*
-        * This is extremely unlikely to happen because almost all
-        * systems have far more MAX_PA/2 than RAM can be fit into
-        * DIMM slots.
-        */
        half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
        if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
                pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+               pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+                               half_pa);
+               pr_info("However, doing so will make a part of your RAM unusable.\n");
+               pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
                return;
        }
 
index 84dee5ab745a2657499334c831ea2467408e81d4..44c4ef3d989b59b7bd98ebf617f8e5ee1d2a9548 100644 (file)
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
        else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
                c->x86_phys_bits = 36;
 #endif
+       c->x86_cache_bits = c->x86_phys_bits;
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
index 401e8c1331089ebcd6b752fddc71d3214f6d331a..fc3c07fe7df58a22c01c8c1180d0b394bde8b59a 100644 (file)
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_HYPERVISOR))
                return false;
 
+       if (c->x86 != 6)
+               return false;
+
        for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
                if (c->x86_model == spectre_bad_microcodes[i].model &&
                    c->x86_stepping == spectre_bad_microcodes[i].stepping)
index 374d1aa66952df3ec3af6a712077e16430e3f87a..ceb67cd5918ff4b0b5dbce93fa1f3a36670084bc 100644 (file)
@@ -113,21 +113,6 @@ static inline void mce_register_injector_chain(struct notifier_block *nb)  { }
 static inline void mce_unregister_injector_chain(struct notifier_block *nb)    { }
 #endif
 
-#ifndef CONFIG_X86_64
-/*
- * On 32-bit systems it would be difficult to safely unmap a poison page
- * from the kernel 1:1 map because there are no non-canonical addresses that
- * we can use to refer to the address without risking a speculative access.
- * However, this isn't much of an issue because:
- * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
- *    are only mapped into the kernel as needed
- * 2) Few people would run a 32-bit kernel on a machine that supports
- *    recoverable errors because they have too much memory to boot 32-bit.
- */
-static inline void mce_unmap_kpfn(unsigned long pfn) {}
-#define mce_unmap_kpfn mce_unmap_kpfn
-#endif
-
 struct mca_config {
        bool dont_log_ce;
        bool cmci_disabled;
index 4b767284b7f5e59e529c5c7e1ae90174d7d23654..953b3ce92dccf0f684ce90e3a27015c99e692470 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/irq_work.h>
 #include <linux/export.h>
 #include <linux/jump_label.h>
+#include <linux/set_memory.h>
 
 #include <asm/intel-family.h>
 #include <asm/processor.h>
@@ -50,7 +51,6 @@
 #include <asm/mce.h>
 #include <asm/msr.h>
 #include <asm/reboot.h>
-#include <asm/set_memory.h>
 
 #include "mce-internal.h"
 
@@ -108,10 +108,6 @@ static struct irq_work mce_irq_work;
 
 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
 
-#ifndef mce_unmap_kpfn
-static void mce_unmap_kpfn(unsigned long pfn);
-#endif
-
 /*
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
@@ -602,7 +598,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
        if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
                pfn = mce->addr >> PAGE_SHIFT;
                if (!memory_failure(pfn, 0))
-                       mce_unmap_kpfn(pfn);
+                       set_mce_nospec(pfn);
        }
 
        return NOTIFY_OK;
@@ -1072,38 +1068,10 @@ static int do_memory_failure(struct mce *m)
        if (ret)
                pr_err("Memory error not recovered");
        else
-               mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
+               set_mce_nospec(m->addr >> PAGE_SHIFT);
        return ret;
 }
 
-#ifndef mce_unmap_kpfn
-static void mce_unmap_kpfn(unsigned long pfn)
-{
-       unsigned long decoy_addr;
-
-       /*
-        * Unmap this page from the kernel 1:1 mappings to make sure
-        * we don't log more errors because of speculative access to
-        * the page.
-        * We would like to just call:
-        *      set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
-        * but doing that would radically increase the odds of a
-        * speculative access to the poison page because we'd have
-        * the virtual address of the kernel 1:1 mapping sitting
-        * around in registers.
-        * Instead we get tricky.  We create a non-canonical address
-        * that looks just like the one we want, but has bit 63 flipped.
-        * This relies on set_memory_np() not checking whether we passed
-        * a legal address.
-        */
-
-       decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
-
-       if (set_memory_np(decoy_addr, 1))
-               pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
-}
-#endif
-
 
 /*
  * Cases where we avoid rendezvous handler timeout:
index 0624957aa0681fe7a260389fe8b64740383ff7c9..07b5fc00b188047cdd2830f164626e19de9da8f9 100644 (file)
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
+       enum ucode_state ret;
        u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
-               c->microcode = rev;
-               uci->cpu_sig.rev = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
                        cpu, mc_amd->hdr.patch_id);
                return UCODE_ERROR;
        }
-       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-               mc_amd->hdr.patch_id);
 
-       uci->cpu_sig.rev = mc_amd->hdr.patch_id;
-       c->microcode = mc_amd->hdr.patch_id;
+       rev = mc_amd->hdr.patch_id;
+       ret = UCODE_UPDATED;
+
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
 
-       return UCODE_UPDATED;
+out:
+       uci->cpu_sig.rev = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
+
+       return ret;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 97ccf4c3b45bec517605813b1f24518b10466002..16936a24795c8457b789f7b428216cfc5bd1fb4e 100644 (file)
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_intel *mc;
+       enum ucode_state ret;
        static int prev_rev;
        u32 rev;
 
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
         */
        rev = intel_get_microcode_revision();
        if (rev >= mc->hdr.rev) {
-               uci->cpu_sig.rev = rev;
-               c->microcode = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        /*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
                prev_rev = rev;
        }
 
+       ret = UCODE_UPDATED;
+
+out:
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
 
-       return UCODE_UPDATED;
+       return ret;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index 9c8652974f8ed1f6a8ed20f97ac843ec332706f2..2b5886401e5f4eb2c221f813675927a53cc97d61 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/bug.h>
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
+#include <linux/kasan.h>
 
 #include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
  * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
  * guesstimate in attempt to achieve all of the above.
  */
-void show_opcodes(u8 *rip, const char *loglvl)
+void show_opcodes(struct pt_regs *regs, const char *loglvl)
 {
 #define PROLOGUE_SIZE 42
 #define EPILOGUE_SIZE 21
 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
        u8 opcodes[OPCODE_BUFSIZE];
+       unsigned long prologue = regs->ip - PROLOGUE_SIZE;
+       bool bad_ip;
 
-       if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+       /*
+        * Make sure userspace isn't trying to trick us into dumping kernel
+        * memory by pointing the userspace instruction pointer at it.
+        */
+       bad_ip = user_mode(regs) &&
+               __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
+
+       if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
+                                       OPCODE_BUFSIZE)) {
                printk("%sCode: Bad RIP value.\n", loglvl);
        } else {
                printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
 #else
        printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
 #endif
-       show_opcodes((u8 *)regs->ip, loglvl);
+       show_opcodes(regs, loglvl);
 }
 
 void show_iret_regs(struct pt_regs *regs)
@@ -135,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
         * they can be printed in the right context.
         */
        if (!partial && on_stack(info, regs, sizeof(*regs))) {
-               __show_regs(regs, 0);
+               __show_regs(regs, SHOW_REGS_SHORT);
 
        } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
                                       IRET_FRAME_SIZE)) {
@@ -333,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
        oops_exit();
 
        /* Executive summary in case the oops scrolled away */
-       __show_regs(&exec_summary_regs, true);
+       __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
 
        if (!signr)
                return;
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
         * We're not going to return, but we might be on an IST stack or
         * have very little stack space left.  Rewind the stack and kill
         * the task.
+        * Before we rewind the stack, we have to tell KASAN that we're going to
+        * reuse the task stack and that existing poisons are invalid.
         */
+       kasan_unpoison_task_stack(current);
        rewind_stack_do_exit(signr);
 }
 NOKPROBE_SYMBOL(oops_end);
@@ -393,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
 
 void show_regs(struct pt_regs *regs)
 {
-       bool all = true;
-
        show_regs_print_info(KERN_DEFAULT);
 
-       if (IS_ENABLED(CONFIG_X86_32))
-               all = !user_mode(regs);
-
-       __show_regs(regs, all);
+       __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
 
        /*
         * When in-kernel, we also print out the stack at the time of the fault..
index f260e452e4f8726237618efe4b9526938d31d81d..e8c8c5d78dbdd38b1089f5dc87cf3450644fe487 100644 (file)
@@ -7,11 +7,17 @@
 #include <linux/eisa.h>
 #include <linux/io.h>
 
+#include <xen/xen.h>
+
 static __init int eisa_bus_probe(void)
 {
-       void __iomem *p = ioremap(0x0FFFD9, 4);
+       void __iomem *p;
+
+       if (xen_pv_domain() && !xen_initial_domain())
+               return 0;
 
-       if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
+       p = ioremap(0x0FFFD9, 4);
+       if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
                EISA_bus = 1;
        iounmap(p);
        return 0;
index 2924fd447e617dd6b4a2aaf1741fefe31c94b676..5046a3c9dec2feaa6761e38c9947e90ad4030efb 100644 (file)
@@ -59,7 +59,7 @@
 #include <asm/intel_rdt_sched.h>
 #include <asm/proto.h>
 
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
 
-       if (!all)
+       if (mode != SHOW_REGS_ALL)
                return;
 
        cr0 = read_cr0();
index 476e3ddf88906d2df48bff128383306271bf14a6..ea5ea850348da94cafb85010c4ba123d83fbba57 100644 (file)
@@ -62,7 +62,7 @@
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
-       if (!all)
+       if (mode == SHOW_REGS_SHORT)
                return;
 
+       if (mode == SHOW_REGS_USER) {
+               rdmsrl(MSR_FS_BASE, fs);
+               rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+               printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
+                      fs, shadowgs);
+               return;
+       }
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
@@ -384,6 +392,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        start_thread_common(regs, new_ip, new_sp,
                            __USER_CS, __USER_DS, 0);
 }
+EXPORT_SYMBOL_GPL(start_thread);
 
 #ifdef CONFIG_COMPAT
 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
index 12cbe2b88c0f3cb14754e6bce2d058b5d0bb63a7..738bf42b0218f54bf08e46eb1eb5159fd8686e44 100644 (file)
@@ -111,8 +111,10 @@ int arch_register_cpu(int num)
        /*
         * Currently CPU0 is only hotpluggable on Intel platforms. Other
         * vendors can add hotplug support later.
+        * Xen PV guests don't support CPU0 hotplug at all.
         */
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       if (c->x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_has(X86_FEATURE_XENPV))
                cpu0_hotpluggable = 0;
 
        /*
index 1463468ba9a0a5dc3914c92f88fffb491dc36d4f..6490f618e09696a7a407859037a0b1635cbb6f9f 100644 (file)
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
 
 static unsigned long __init get_loops_per_jiffy(void)
 {
-       unsigned long lpj = tsc_khz * KHZ;
+       u64 lpj = (u64)tsc_khz * KHZ;
 
        do_div(lpj, HZ);
        return lpj;
index 0cefba28c864a3a0925378ed9b438a05b554cff2..17c0472c5b344faaaac3153ff53c4600a3fcd81e 100644 (file)
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 }
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit)
 {
        int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
        rcu_read_lock();
        map = rcu_dereference(kvm->arch.apic_map);
 
+       if (min > map->max_apic_id)
+               goto out;
        /* Bits above cluster_size are masked in the caller.  */
-       for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+       for_each_set_bit(i, &ipi_bitmap_low,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
        min += cluster_size;
-       for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+
+       if (min > map->max_apic_id)
+               goto out;
+
+       for_each_set_bit(i, &ipi_bitmap_high,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
+out:
        rcu_read_unlock();
        return count;
 }
index a282321329b51a10e7563b0d422bd9e38d57b530..e24ea7067373af69d258c46995007b0446a69fdc 100644 (file)
@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
        return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                       void *insn, int insn_len)
 {
-       int r, emulation_type = EMULTYPE_RETRY;
+       int r, emulation_type = 0;
        enum emulation_result er;
        bool direct = vcpu->arch.mmu.direct_map;
 
@@ -5230,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
        r = RET_PF_INVALID;
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
                r = handle_mmio_page_fault(vcpu, cr2, direct);
-               if (r == RET_PF_EMULATE) {
-                       emulation_type = 0;
+               if (r == RET_PF_EMULATE)
                        goto emulate;
-               }
        }
 
        if (r == RET_PF_INVALID) {
@@ -5260,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                return 1;
        }
 
-       if (mmio_info_in_cache(vcpu, cr2, direct))
-               emulation_type = 0;
+       /*
+        * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+        * optimistically try to just unprotect the page and let the processor
+        * re-execute the instruction that caused the page fault.  Do not allow
+        * retrying MMIO emulation, as it's not only pointless but could also
+        * cause us to enter an infinite loop because the processor will keep
+        * faulting on the non-existent MMIO address.  Retrying an instruction
+        * from a nested guest is also pointless and dangerous as we are only
+        * explicitly shadowing L1's page tables, i.e. unprotecting something
+        * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+        */
+       if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+               emulation_type = EMULTYPE_ALLOW_RETRY;
 emulate:
        /*
         * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
index 6276140044d0848b58e6778c31cf4d247520ac44..89c4c5aa15f16c71af5404f302a627313a61fd96 100644 (file)
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        }
 
        if (!svm->next_rip) {
-               if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+               if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
                                EMULATE_DONE)
                        printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
@@ -2715,7 +2715,7 @@ static int gp_interception(struct vcpu_svm *svm)
 
        WARN_ON_ONCE(!enable_vmware_backdoor);
 
-       er = emulate_instruction(vcpu,
+       er = kvm_emulate_instruction(vcpu,
                EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
        if (er == EMULATE_USER_EXIT)
                return 0;
@@ -2819,7 +2819,7 @@ static int io_interception(struct vcpu_svm *svm)
        string = (io_info & SVM_IOIO_STR_MASK) != 0;
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3861,7 @@ static int iret_interception(struct vcpu_svm *svm)
 static int invlpg_interception(struct vcpu_svm *svm)
 {
        if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
-               return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 
        kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
        return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3869,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-       return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
 static int rsm_interception(struct vcpu_svm *svm)
 {
-       return x86_emulate_instruction(&svm->vcpu, 0, 0,
-                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+       return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+                                       rsm_ins_bytes, 2) == EMULATE_DONE;
 }
 
 static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4700,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
                ret = avic_unaccel_trap_write(svm);
        } else {
                /* Handling Fault */
-               ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+               ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
        }
 
        return ret;
@@ -6747,7 +6747,7 @@ e_free:
 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 {
        unsigned long vaddr, vaddr_end, next_vaddr;
-       unsigned long dst_vaddr, dst_vaddr_end;
+       unsigned long dst_vaddr;
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
@@ -6763,7 +6763,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        size = debug.len;
        vaddr_end = vaddr + size;
        dst_vaddr = debug.dst_uaddr;
-       dst_vaddr_end = dst_vaddr + size;
 
        for (; vaddr < vaddr_end; vaddr = next_vaddr) {
                int len, s_off, d_off;
index 8dae47e7267af8c77b6d14766e7b771166ca85d8..533a327372c876df0b1c2b99ea3558e2aaa92df4 100644 (file)
@@ -6983,7 +6983,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
         * Cause the #SS fault with 0 error code in VM86 mode.
         */
        if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
-               if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+               if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
                        if (vcpu->arch.halt_request) {
                                vcpu->arch.halt_request = 0;
                                return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7054,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 
        if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
                WARN_ON_ONCE(!enable_vmware_backdoor);
-               er = emulate_instruction(vcpu,
+               er = kvm_emulate_instruction(vcpu,
                        EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
                if (er == EMULATE_USER_EXIT)
                        return 0;
@@ -7157,7 +7157,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
        ++vcpu->stat.io_exits;
 
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = exit_qualification >> 16;
        size = (exit_qualification & 7) + 1;
@@ -7231,7 +7231,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
 static int handle_desc(struct kvm_vcpu *vcpu)
 {
        WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7480,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
 
 static int handle_invd(struct kvm_vcpu *vcpu)
 {
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7547,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
                        return kvm_skip_emulated_instruction(vcpu);
                }
        }
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7704,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
                        return kvm_skip_emulated_instruction(vcpu);
                else
-                       return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
-                                                      NULL, 0) == EMULATE_DONE;
+                       return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+                                                               EMULATE_DONE;
        }
 
        return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7748,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, 0);
+               err = kvm_emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -10131,9 +10131,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
  * information but as all relevant affected CPUs have 32KiB L1D cache size
  * there is no point in doing so.
  */
-#define L1D_CACHE_ORDER 4
-static void *vmx_l1d_flush_pages;
-
 static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
 {
        int size = PAGE_SIZE << L1D_CACHE_ORDER;
@@ -12540,8 +12537,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        bool from_vmentry = !!exit_qual;
        u32 dummy_exit_qual;
+       u32 vmcs01_cpu_exec_ctrl;
        int r = 0;
 
+       vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+
        enter_guest_mode(vcpu);
 
        if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -12577,6 +12577,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
                kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
        }
 
+       /*
+        * If L1 had a pending IRQ/NMI until it executed
+        * VMLAUNCH/VMRESUME which wasn't delivered because it was
+        * disallowed (e.g. interrupts disabled), L0 needs to
+        * evaluate if this pending event should cause an exit from L2
+        * to L1 or delivered directly to L2 (e.g. In case L1 don't
+        * intercept EXTERNAL_INTERRUPT).
+        *
+        * Usually this would be handled by L0 requesting a
+        * IRQ/NMI window by setting VMCS accordingly. However,
+        * this setting was done on VMCS01 and now VMCS02 is active
+        * instead. Thus, we force L0 to perform pending event
+        * evaluation by requesting a KVM_REQ_EVENT.
+        */
+       if (vmcs01_cpu_exec_ctrl &
+               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+       }
+
        /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
@@ -13991,9 +14010,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
                return -EINVAL;
 
-       if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
-               vmx->nested.nested_run_pending = 1;
-
        vmx->nested.dirty_vmcs12 = true;
        ret = enter_vmx_non_root_mode(vcpu, NULL);
        if (ret)
index 506bd2b4b8bb76e21a959310d33f6de6180719f8..542f6315444d75aa365ca04dba4bddd7bfb369d3 100644 (file)
@@ -4987,7 +4987,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
                emul_type = 0;
        }
 
-       er = emulate_instruction(vcpu, emul_type);
+       er = kvm_emulate_instruction(vcpu, emul_type);
        if (er == EMULATE_USER_EXIT)
                return 0;
        if (er != EMULATE_DONE)
@@ -5870,7 +5870,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
        gpa_t gpa = cr2;
        kvm_pfn_t pfn;
 
-       if (emulation_type & EMULTYPE_NO_REEXECUTE)
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5961,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
         */
        vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
 
-       if (!(emulation_type & EMULTYPE_RETRY))
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6282,19 @@ restart:
 
        return r;
 }
-EXPORT_SYMBOL_GPL(x86_emulate_instruction);
+
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
+{
+       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len)
+{
+       return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
@@ -7734,7 +7752,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
        int r;
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        if (r != EMULATE_DONE)
                return 0;
index 257f27620bc272e3312295714a120de07963441f..67b9568613f34abdcacc208b4f23ef1414a512eb 100644 (file)
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
 bool kvm_vector_hashing_enabled(void);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+                           int emulation_type, void *insn, int insn_len);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
                                | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
index c8c6ad0d58b89c3621d0fcf11f45e00442ebf2a2..3f435d7fca5e62bc999e48c6a4f3e1c0fc86def9 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/uaccess.h>
 #include <linux/export.h>
 
+#include <asm/tlbflush.h>
+
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        if (__range_not_ok(from, n, TASK_SIZE))
                return n;
 
+       if (!nmi_uaccess_okay())
+               return n;
+
        /*
         * Even though this function is typically called from NMI/IRQ context
         * disable pagefaults so that its behaviour is consistent even when
index b45f5aaefd7496e1af36037908efa8af3fccede5..076ebdce9bd4307945bf9d1ecfc54476686663fc 100644 (file)
@@ -2,6 +2,8 @@
 
 #include <linux/spinlock.h>
 #include <linux/percpu.h>
+#include <linux/kallsyms.h>
+#include <linux/kcore.h>
 
 #include <asm/cpu_entry_area.h>
 #include <asm/pgtable.h>
@@ -13,6 +15,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
        [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+static DEFINE_PER_CPU(struct kcore_list, kcore_entry_trampoline);
 #endif
 
 struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -146,10 +149,40 @@ static void __init setup_cpu_entry_area(int cpu)
 
        cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
                     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+       /*
+        * The cpu_entry_area alias addresses are not in the kernel binary
+        * so they do not show up in /proc/kcore normally.  This adds entries
+        * for them manually.
+        */
+       kclist_add_remap(&per_cpu(kcore_entry_trampoline, cpu),
+                        _entry_trampoline,
+                        &get_cpu_entry_area(cpu)->entry_trampoline, PAGE_SIZE);
 #endif
        percpu_setup_debug_store(cpu);
 }
 
+#ifdef CONFIG_X86_64
+int arch_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                    char *name)
+{
+       unsigned int cpu, ncpu = 0;
+
+       if (symnum >= num_possible_cpus())
+               return -EINVAL;
+
+       for_each_possible_cpu(cpu) {
+               if (ncpu++ >= symnum)
+                       break;
+       }
+
+       *value = (unsigned long)&get_cpu_entry_area(cpu)->entry_trampoline;
+       *type = 't';
+       strlcpy(name, "__entry_SYSCALL_64_trampoline", KSYM_NAME_LEN);
+
+       return 0;
+}
+#endif
+
 static __init void setup_cpu_entry_area_ptes(void)
 {
 #ifdef CONFIG_X86_32
index b9123c497e0a7b20e25aa6608622118e6a00e3ea..47bebfe6efa70a316424934683f4302de33876a2 100644 (file)
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 
        printk(KERN_CONT "\n");
 
-       show_opcodes((u8 *)regs->ip, loglvl);
+       show_opcodes(regs, loglvl);
 }
 
 static void
index 5c32a766549215c3032dfceade9bbde7b8b56a89..7a8fc26c11155edac84372e2af7133e0bea07c36 100644 (file)
@@ -930,7 +930,7 @@ unsigned long max_swapfile_size(void)
 
        if (boot_cpu_has_bug(X86_BUG_L1TF)) {
                /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
-               unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
+               unsigned long long l1tf_limit = l1tf_pfn_limit();
                /*
                 * We encode swap offsets also with 3 bits below those for pfn
                 * which makes the usable limit higher.
@@ -938,7 +938,7 @@ unsigned long max_swapfile_size(void)
 #if CONFIG_PGTABLE_LEVELS > 2
                l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
 #endif
-               pages = min_t(unsigned long, l1tf_limit, pages);
+               pages = min_t(unsigned long long, l1tf_limit, pages);
        }
        return pages;
 }
index f40ab8185d9459d1712c702b10db48257a4866fe..1e95d57760cf79becf81c012df564e42ae7122e2 100644 (file)
@@ -257,7 +257,7 @@ bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
        /* If it's real memory always allow */
        if (pfn_valid(pfn))
                return true;
-       if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+       if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
                return false;
        return true;
 }
index 8d6c34fe49be9567157b65fa69d8d1059f7779ee..51a5a69ecac9f24ab794ea6e86ea6d7fb39367a8 100644 (file)
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
        return 0;
 }
 
+/*
+ * Machine check recovery code needs to change cache mode of poisoned
+ * pages to UC to avoid speculative access logging another error. But
+ * passing the address of the 1:1 mapping to set_memory_uc() is a fine
+ * way to encourage a speculative access. So we cheat and flip the top
+ * bit of the address. This works fine for the code that updates the
+ * page tables. But at the end of the process we need to flush the cache
+ * and the non-canonical address causes a #GP fault when used by the
+ * CLFLUSH instruction.
+ *
+ * But in the common case we already have a canonical address. This code
+ * will fix the top bit if needed and is a no-op otherwise.
+ */
+static inline unsigned long make_addr_canonical_again(unsigned long addr)
+{
+#ifdef CONFIG_X86_64
+       return (long)(addr << 1) >> 1;
+#else
+       return addr;
+#endif
+}
+
+
 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                                    pgprot_t mask_set, pgprot_t mask_clr,
                                    int force_split, int in_flag,
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                 * Save address for cache flush. *addr is modified in the call
                 * to __change_page_attr_set_clr() below.
                 */
-               baddr = *addr;
+               baddr = make_addr_canonical_again(*addr);
        }
 
        /* Must avoid aliasing mappings in the highmem code */
index 1555bd7d34493f972464e2df7e11a29f445ac600..3d0c83ef6aab98cde354b86f999eecfca3a38263 100644 (file)
@@ -512,6 +512,17 @@ static int free_ram_pages_type(u64 start, u64 end)
        return 0;
 }
 
+static u64 sanitize_phys(u64 address)
+{
+       /*
+        * When changing the memtype for pages containing poison allow
+        * for a "decoy" virtual address (bit 63 clear) passed to
+        * set_memory_X(). __pa() on a "decoy" address results in a
+        * physical address with bit 63 set.
+        */
+       return address & __PHYSICAL_MASK;
+}
+
 /*
  * req_type typically has one of the:
  * - _PAGE_CACHE_MODE_WB
@@ -533,6 +544,8 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
        int is_range_ram;
        int err = 0;
 
+       start = sanitize_phys(start);
+       end = sanitize_phys(end);
        BUG_ON(start >= end); /* end is exclusive */
 
        if (!pat_enabled()) {
@@ -609,6 +622,9 @@ int free_memtype(u64 start, u64 end)
        if (!pat_enabled())
                return 0;
 
+       start = sanitize_phys(start);
+       end = sanitize_phys(end);
+
        /* Low ISA region is always mapped WB. No need to track */
        if (x86_platform.is_untracked_pat_range(start, end))
                return 0;
index e848a48117856c8e58dcf313339853563f5c6f7f..ae394552fb945def068d2c877a153e9d7f040228 100644 (file)
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
        if (pgd_val(pgd) != 0) {
                pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-               *pgdp = native_make_pgd(0);
+               pgd_clear(pgdp);
 
                paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                pmd_free(mm, pmd);
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        int changed = !pte_same(*ptep, entry);
 
        if (changed && dirty)
-               *ptep = entry;
+               set_pte(ptep, entry);
 
        return changed;
 }
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        if (changed && dirty) {
-               *pmdp = entry;
+               set_pmd(pmdp, entry);
                /*
                 * We had a write-protection fault here and changed the pmd
                 * to to more permissive. No need to flush the TLB for that,
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 
        if (changed && dirty) {
-               *pudp = entry;
+               set_pud(pudp, entry);
                /*
                 * We had a write-protection fault here and changed the pud
                 * to to more permissive. No need to flush the TLB for that,
index 31341ae7309f6b957e8fa8e7207bcd2c8433e6bb..c1fc1ae6b42947a9f6dece93efce6e4f843b7def 100644 (file)
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
  *
  * Returns a pointer to a PTE on success, or NULL on failure.
  */
-static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 {
        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pmd_t *pmd;
index 9517d1b2a2810817907640c6c2dde698c62b3e79..e96b99eb800ccd5f170f7539efaaf99720d192b4 100644 (file)
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 
                choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
 
+               /* Let nmi_uaccess_okay() know that we're changing CR3. */
+               this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+               barrier();
+
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (next != &init_mm)
                        this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
 
+               /* Make sure we write CR3 before loaded_mm. */
+               barrier();
+
                this_cpu_write(cpu_tlbstate.loaded_mm, next);
                this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
        }
index 324b93328b3746f4bb8010780ee85915707b8095..9959657127f476ae3018894ffc31909ee2c6d79e 100644 (file)
@@ -85,12 +85,7 @@ pgd_t * __init efi_call_phys_prolog(void)
 
 void __init efi_call_phys_epilog(pgd_t *save_pgd)
 {
-       struct desc_ptr gdt_descr;
-
-       gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
-       gdt_descr.size = GDT_SIZE - 1;
-       load_gdt(&gdt_descr);
-
+       load_fixmap_gdt(0);
        load_cr3(save_pgd);
        __flush_tlb_all();
 }
index 45b700ac5fe7e0685578597ee4189b8124b403c1..2fe5c9b1816bbd5e6f0adcc9385bcdf09b173154 100644 (file)
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        trace_xen_mmu_set_pte_atomic(ptep, pte);
-       set_64bit((u64 *)ptep, native_pte_val(pte));
+       __xen_set_pte(ptep, pte);
 }
 
 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        trace_xen_mmu_pte_clear(mm, addr, ptep);
-       if (!xen_batched_set_pte(ptep, native_make_pte(0)))
-               native_pte_clear(mm, addr, ptep);
+       __xen_set_pte(ptep, native_make_pte(0));
 }
 
 static void xen_pmd_clear(pmd_t *pmdp)
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
                pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
                               pte_val_ma(pte));
 #endif
-       native_set_pte(ptep, pte);
+       __xen_set_pte(ptep, pte);
 }
 
 /* Early in boot, while setting up the initial pagetable, assume
@@ -2061,7 +2060,6 @@ void __init xen_relocate_p2m(void)
        pud_t *pud;
        pgd_t *pgd;
        unsigned long *new_p2m;
-       int save_pud;
 
        size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
        n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -2091,7 +2089,6 @@ void __init xen_relocate_p2m(void)
 
        pgd = __va(read_cr3_pa());
        new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
-       save_pud = n_pud;
        for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
                pud = early_memremap(pud_phys, PAGE_SIZE);
                clear_page(pud);
index 04d038f3b6fab86fed6e458b136f8fc46c05feeb..b9ad83a0ee5dbf1604acc3c4edc90698e7eea1a6 100644 (file)
@@ -4,6 +4,7 @@ config ZONE_DMA
 
 config XTENSA
        def_bool y
+       select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
index 3a934b72a2728948a0009ca3a92faf63c0b4a1ed..d67e30faff9c83b519702e15e1ea4ad70eed52d8 100644 (file)
@@ -49,7 +49,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
 KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
 
 ifneq ($(CONFIG_LD_NO_RELAX),)
-LDFLAGS := --no-relax
+KBUILD_LDFLAGS := --no-relax
 endif
 
 ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
@@ -64,11 +64,7 @@ endif
 vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
 plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
 
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
-else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
-endif
 
 KBUILD_DEFCONFIG := iss_defconfig
 
index 52147198135635b7c9d6e98d01dda95e7f59e755..12ae1e91cb75fbcb729491fef73ec0a4736a7fee 100644 (file)
@@ -25,7 +25,7 @@ $(obj)/Image.o: vmlinux.bin $(OBJS)
                $(OBJS) $@
 
 $(obj)/../Image.elf: $(obj)/Image.o $(obj)/boot.lds
-       $(Q)$(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) \
+       $(Q)$(LD) $(KBUILD_LDFLAGS) $(LDFLAGS_vmlinux) \
                -T $(obj)/boot.lds \
                --build-id=none \
                -o $@ $(obj)/Image.o
index f4bbb28026f8be3006890fe236989db791c79e1d..58709e89a8ed1f41ed7acaad204d4d91e973386a 100644 (file)
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
 
 void __init platform_setup(char **p_cmdline)
 {
+       static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+       static char cmdline[COMMAND_LINE_SIZE] __initdata;
        int argc = simc_argc();
        int argv_size = simc_argv_size();
 
        if (argc > 1) {
-               void **argv = alloc_bootmem(argv_size);
-               char *cmdline = alloc_bootmem(argv_size);
-               int i;
+               if (argv_size > sizeof(argv)) {
+                       pr_err("%s: command line too long: argv_size = %d\n",
+                              __func__, argv_size);
+               } else {
+                       int i;
 
-               cmdline[0] = 0;
-               simc_argv((void *)argv);
+                       cmdline[0] = 0;
+                       simc_argv((void *)argv);
 
-               for (i = 1; i < argc; ++i) {
-                       if (i > 1)
-                               strcat(cmdline, " ");
-                       strcat(cmdline, argv[i]);
+                       for (i = 1; i < argc; ++i) {
+                               if (i > 1)
+                                       strcat(cmdline, " ");
+                               strcat(cmdline, argv[i]);
+                       }
+                       *p_cmdline = cmdline;
                }
-               *p_cmdline = cmdline;
        }
 
        atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
index 3918ff7235ed8bfb1425269a867cd3133ec58730..8f29058adf93c9f30b53035e6784cd337a2c0a12 100644 (file)
@@ -5,8 +5,8 @@
        __INITRODATA
 
        .align 8
-       .globl VMLINUX_SYMBOL(system_certificate_list)
-VMLINUX_SYMBOL(system_certificate_list):
+       .globl system_certificate_list
+system_certificate_list:
 __cert_list_start:
 #ifdef CONFIG_MODULE_SIG
        .incbin "certs/signing_key.x509"
@@ -15,21 +15,21 @@ __cert_list_start:
 __cert_list_end:
 
 #ifdef CONFIG_SYSTEM_EXTRA_CERTIFICATE
-       .globl VMLINUX_SYMBOL(system_extra_cert)
+       .globl system_extra_cert
        .size system_extra_cert, CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE
-VMLINUX_SYMBOL(system_extra_cert):
+system_extra_cert:
        .fill CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE, 1, 0
 
        .align 4
-       .globl VMLINUX_SYMBOL(system_extra_cert_used)
-VMLINUX_SYMBOL(system_extra_cert_used):
+       .globl system_extra_cert_used
+system_extra_cert_used:
        .int 0
 
 #endif /* CONFIG_SYSTEM_EXTRA_CERTIFICATE */
 
        .align 8
-       .globl VMLINUX_SYMBOL(system_certificate_list_size)
-VMLINUX_SYMBOL(system_certificate_list_size):
+       .globl system_certificate_list_size
+system_certificate_list_size:
 #ifdef CONFIG_64BIT
        .quad __cert_list_end - __cert_list_start
 #else
index 9706613eecf9e2320209225b41d4856fecbf3ae1..bf64cfa30febf173640729db80f9a567ce50b4b0 100644 (file)
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;
 
 static void lpss_iosf_enter_d3_state(void)
 {
index 292088fcc6245ba2a683f02899750a1a70249701..d2e29a19890d14db1a9e6fd85f207d558c6a3c6e 100644 (file)
 #include <linux/delay.h>
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
+#include <linux/dmi.h>
 #endif
 #include <linux/acpi_iort.h>
 #include <linux/pci.h>
 #include <acpi/apei.h>
-#include <linux/dmi.h>
 #include <linux/suspend.h>
 
 #include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
        },
        {}
 };
-#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
-       {}
-};
 #endif
 
 /* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
+#ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
-        * DSDT will be copied to memory
+        * DSDT will be copied to memory.
+        * Note that calling dmi_check_system() here on other architectures
+        * would not be OK because only x86 initializes dmi early enough.
+        * Thankfully only x86 systems need such quirks for now.
         */
        dmi_check_system(dsdt_dmi_table);
+#endif
 
        status = acpi_reallocate_root_table();
        if (ACPI_FAILURE(status)) {
index 7c479002e798bf92f3dc58263c3c2064182922bb..b072cfc5f20ee0ec47ba83532a58eafdfcbd2534 100644 (file)
@@ -1699,7 +1699,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 {
        struct acpi_device *adev, *adev_dimm;
        struct device *dev = acpi_desc->dev;
-       unsigned long dsm_mask;
+       unsigned long dsm_mask, label_mask;
        const guid_t *guid;
        int i;
        int family = -1;
@@ -1771,6 +1771,16 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                                        1ULL << i))
                        set_bit(i, &nfit_mem->dsm_mask);
 
+       /*
+        * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
+        * due to their better semantics handling locked capacity.
+        */
+       label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
+               | 1 << ND_CMD_SET_CONFIG_DATA;
+       if (family == NVDIMM_FAMILY_INTEL
+                       && (dsm_mask & label_mask) == label_mask)
+               return 0;
+
        if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
                        && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
                dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
@@ -2559,7 +2569,12 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
                        test_bit(ARS_SHORT, &nfit_spa->ars_state)
                        ? "short" : "long");
        clear_bit(ARS_SHORT, &nfit_spa->ars_state);
-       set_bit(ARS_DONE, &nfit_spa->ars_state);
+       if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
+               set_bit(ARS_SHORT, &nfit_spa->ars_state);
+               set_bit(ARS_REQ, &nfit_spa->ars_state);
+               dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
+       } else
+               set_bit(ARS_DONE, &nfit_spa->ars_state);
 }
 
 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -3256,9 +3271,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
                if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
                        continue;
 
-               if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state))
+               if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
                        busy++;
-               else {
+                       set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
+               } else {
                        if (test_bit(ARS_SHORT, &flags))
                                set_bit(ARS_SHORT, &nfit_spa->ars_state);
                        scheduled++;
index a97ff42fe311bfa5041f54d67124aed4b85deb4d..d1274ea2d251636115a645ff4ba4ed8eddb12798 100644 (file)
@@ -119,6 +119,7 @@ enum nfit_dimm_notifiers {
 
 enum nfit_ars_state {
        ARS_REQ,
+       ARS_REQ_REDO,
        ARS_DONE,
        ARS_SHORT,
        ARS_FAILED,
index 3f3b7b253445a1fb87dfefdfaae9ad744b82d50d..64fd96eada31f42e5677a72de837fafa2987165b 100644 (file)
@@ -332,6 +332,35 @@ err_no_vma:
        return vma ? -ENOMEM : -ESRCH;
 }
 
+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+{
+       if (vma)
+               alloc->vma_vm_mm = vma->vm_mm;
+       /*
+        * If we see alloc->vma is not NULL, buffer data structures set up
+        * completely. Look at smp_rmb side binder_alloc_get_vma.
+        * We also want to guarantee new alloc->vma_vm_mm is always visible
+        * if alloc->vma is set.
+        */
+       smp_wmb();
+       alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+               struct binder_alloc *alloc)
+{
+       struct vm_area_struct *vma = NULL;
+
+       if (alloc->vma) {
+               /* Look at description in binder_alloc_set_vma */
+               smp_rmb();
+               vma = alloc->vma;
+       }
+       return vma;
+}
+
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
-       if (alloc->vma == NULL) {
+       if (!binder_alloc_get_vma(alloc)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
        alloc->free_async_space = alloc->buffer_size / 2;
-       barrier();
-       alloc->vma = vma;
-       alloc->vma_vm_mm = vma->vm_mm;
+       binder_alloc_set_vma(alloc, vma);
        mmgrab(alloc->vma_vm_mm);
 
        return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
        int buffers, page_count;
        struct binder_buffer *buffer;
 
-       BUG_ON(alloc->vma);
-
        buffers = 0;
        mutex_lock(&alloc->mutex);
+       BUG_ON(alloc->vma);
+
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       WRITE_ONCE(alloc->vma, NULL);
+       binder_alloc_set_vma(alloc, NULL);
 }
 
 /**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       vma = alloc->vma;
+       vma = binder_alloc_get_vma(alloc);
        if (vma) {
                if (!mmget_not_zero(alloc->vma_vm_mm))
                        goto err_mmget;
index 3893f9bde1e6bc0db2f081943b74233989366540..a9dd4ea7467df60912baca1fd7ebfeb91c9d2988 100644 (file)
@@ -7404,4 +7404,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
 EXPORT_SYMBOL_GPL(ata_cable_ignore);
 EXPORT_SYMBOL_GPL(ata_cable_sata);
 EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(ata_host_put);
index 0943e7065e0eadbc1348b3bfdcf7aed15b495c62..b3c0498ee4331f4c2a47a72fb74bce9328b4486e 100644 (file)
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
 static int alloc_lookup_fw_priv(const char *fw_name,
                                struct firmware_cache *fwc,
                                struct fw_priv **fw_priv, void *dbuf,
-                               size_t size)
+                               size_t size, enum fw_opt opt_flags)
 {
        struct fw_priv *tmp;
 
        spin_lock(&fwc->lock);
-       tmp = __lookup_fw_priv(fw_name);
-       if (tmp) {
-               kref_get(&tmp->ref);
-               spin_unlock(&fwc->lock);
-               *fw_priv = tmp;
-               pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
-               return 1;
+       if (!(opt_flags & FW_OPT_NOCACHE)) {
+               tmp = __lookup_fw_priv(fw_name);
+               if (tmp) {
+                       kref_get(&tmp->ref);
+                       spin_unlock(&fwc->lock);
+                       *fw_priv = tmp;
+                       pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+                       return 1;
+               }
        }
+
        tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-       if (tmp)
+       if (tmp && !(opt_flags & FW_OPT_NOCACHE))
                list_add(&tmp->list, &fwc->head);
        spin_unlock(&fwc->lock);
 
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-                         struct device *device, void *dbuf, size_t size)
+                         struct device *device, void *dbuf, size_t size,
+                         enum fw_opt opt_flags)
 {
        struct firmware *firmware;
        struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
                return 0; /* assigned */
        }
 
-       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
+       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+                                 opt_flags);
 
        /*
         * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
                goto out;
        }
 
-       ret = _request_firmware_prepare(&fw, name, device, buf, size);
+       ret = _request_firmware_prepare(&fw, name, device, buf, size,
+                                       opt_flags);
        if (ret <= 0) /* error or already assigned */
                goto out;
 
index c8a1cb0b61361f63088ecb940c0ca8dc7dee67cf..817320c7c4c1b72cf248b73a184bc8dee6ef28ac 100644 (file)
@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
        struct zone *default_zone;
        int nid;
 
-       /*
-        * The block contains more than one zone can not be offlined.
-        * This can happen e.g. for ZONE_DMA and ZONE_DMA32
-        */
-       if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
-               return sprintf(buf, "none\n");
-
-       start_pfn = valid_start_pfn;
-       nr_pages = valid_end_pfn - start_pfn;
-
        /*
         * Check the existing zone. Make sure that we do that only on the
         * online nodes otherwise the page_zone is not reliable
         */
        if (mem->state == MEM_ONLINE) {
+               /*
+                * The block contains more than one zone can not be offlined.
+                * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+                */
+               if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+                                         &valid_start_pfn, &valid_end_pfn))
+                       return sprintf(buf, "none\n");
+               start_pfn = valid_start_pfn;
                strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
                goto out;
        }
 
-       nid = pfn_to_nid(start_pfn);
+       nid = mem->nid;
        default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
        strcat(buf, default_zone->name);
 
index 8e2e4757adcb02c9cd07a0b868a45e0bb3baa3eb..5a42ae4078c27febf8194901d8fa0e30b43bd1aa 100644 (file)
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
 int of_pm_clk_add_clks(struct device *dev)
 {
        struct clk **clks;
-       unsigned int i, count;
+       int i, count;
        int ret;
 
        if (!dev || !dev->of_node)
index db253cd5b32af2d8920934aaaa49bf806c922a8f..d0666f5ce0036aea97404cdfd76a515b5bd49da2 100644 (file)
@@ -118,7 +118,6 @@ static struct dentry *dfs_device_status;
 
 static u32 cpu_use[NR_CPUS];
 
-static DEFINE_SPINLOCK(rssd_index_lock);
 static DEFINE_IDA(rssd_index_ida);
 
 static int mtip_block_initialize(struct driver_data *dd);
@@ -3767,20 +3766,10 @@ static int mtip_block_initialize(struct driver_data *dd)
                goto alloc_disk_error;
        }
 
-       /* Generate the disk name, implemented same as in sd.c */
-       do {
-               if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
-                       rv = -ENOMEM;
-                       goto ida_get_error;
-               }
-
-               spin_lock(&rssd_index_lock);
-               rv = ida_get_new(&rssd_index_ida, &index);
-               spin_unlock(&rssd_index_lock);
-       } while (rv == -EAGAIN);
-
-       if (rv)
+       rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
+       if (rv < 0)
                goto ida_get_error;
+       index = rv;
 
        rv = rssd_disk_name_format("rssd",
                                index,
@@ -3922,9 +3911,7 @@ block_queue_alloc_init_error:
 block_queue_alloc_tag_error:
        mtip_hw_debugfs_exit(dd);
 disk_index_error:
-       spin_lock(&rssd_index_lock);
-       ida_remove(&rssd_index_ida, index);
-       spin_unlock(&rssd_index_lock);
+       ida_free(&rssd_index_ida, index);
 
 ida_get_error:
        put_disk(dd->disk);
@@ -4012,9 +3999,7 @@ static int mtip_block_remove(struct driver_data *dd)
        }
        dd->disk  = NULL;
 
-       spin_lock(&rssd_index_lock);
-       ida_remove(&rssd_index_ida, dd->index);
-       spin_unlock(&rssd_index_lock);
+       ida_free(&rssd_index_ida, dd->index);
 
        /* De-initialize the protocol layer. */
        mtip_hw_exit(dd);
@@ -4054,9 +4039,7 @@ static int mtip_block_shutdown(struct driver_data *dd)
                dd->queue = NULL;
        }
 
-       spin_lock(&rssd_index_lock);
-       ida_remove(&rssd_index_ida, dd->index);
-       spin_unlock(&rssd_index_lock);
+       ida_free(&rssd_index_ida, dd->index);
        return 0;
 }
 
index 7915f3b03736eadebf623801802bdb1d7bd42f75..73ed5f3a862dfcde598227671d55c2491d33408b 100644 (file)
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
 
                count += sprintf(&buf[count], "%s"
                            "pool_id %llu\npool_name %s\n"
+                           "pool_ns %s\n"
                            "image_id %s\nimage_name %s\n"
                            "snap_id %llu\nsnap_name %s\n"
                            "overlap %llu\n",
                            !count ? "" : "\n", /* first? */
                            spec->pool_id, spec->pool_name,
+                           spec->pool_ns ?: "",
                            spec->image_id, spec->image_name ?: "(unknown)",
                            spec->snap_id, spec->snap_name,
                            rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
                                                &rbd_dev->header.features);
 }
 
+struct parent_image_info {
+       u64             pool_id;
+       const char      *pool_ns;
+       const char      *image_id;
+       u64             snap_id;
+
+       bool            has_overlap;
+       u64             overlap;
+};
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int decode_parent_image_spec(void **p, void *end,
+                                   struct parent_image_info *pii)
+{
+       u8 struct_v;
+       u32 struct_len;
+       int ret;
+
+       ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
+                                 &struct_v, &struct_len);
+       if (ret)
+               return ret;
+
+       ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
+       pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->pool_ns)) {
+               ret = PTR_ERR(pii->pool_ns);
+               pii->pool_ns = NULL;
+               return ret;
+       }
+       pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int __get_parent_info(struct rbd_device *rbd_dev,
+                            struct page *req_page,
+                            struct page *reply_page,
+                            struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret == -EOPNOTSUPP ? 1 : ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ret = decode_parent_image_spec(&p, end, pii);
+       if (ret)
+               return ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
+       if (pii->has_overlap)
+               ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+                                   struct page *req_page,
+                                   struct page *reply_page,
+                                   struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "get_parent", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
+       pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
+       pii->has_overlap = true;
+       ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int get_parent_info(struct rbd_device *rbd_dev,
+                          struct parent_image_info *pii)
+{
+       struct page *req_page, *reply_page;
+       void *p;
+       int ret;
+
+       req_page = alloc_page(GFP_KERNEL);
+       if (!req_page)
+               return -ENOMEM;
+
+       reply_page = alloc_page(GFP_KERNEL);
+       if (!reply_page) {
+               __free_page(req_page);
+               return -ENOMEM;
+       }
+
+       p = page_address(req_page);
+       ceph_encode_64(&p, rbd_dev->spec->snap_id);
+       ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
+       if (ret > 0)
+               ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
+                                              pii);
+
+       __free_page(req_page);
+       __free_page(reply_page);
+       return ret;
+}
+
 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 {
        struct rbd_spec *parent_spec;
-       size_t size;
-       void *reply_buf = NULL;
-       __le64 snapid;
-       void *p;
-       void *end;
-       u64 pool_id;
-       char *image_id;
-       u64 snap_id;
-       u64 overlap;
+       struct parent_image_info pii = { 0 };
        int ret;
 
        parent_spec = rbd_spec_alloc();
        if (!parent_spec)
                return -ENOMEM;
 
-       size = sizeof (__le64) +                                /* pool_id */
-               sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
-               sizeof (__le64) +                               /* snap_id */
-               sizeof (__le64);                                /* overlap */
-       reply_buf = kmalloc(size, GFP_KERNEL);
-       if (!reply_buf) {
-               ret = -ENOMEM;
+       ret = get_parent_info(rbd_dev, &pii);
+       if (ret)
                goto out_err;
-       }
 
-       snapid = cpu_to_le64(rbd_dev->spec->snap_id);
-       ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
-                                 &rbd_dev->header_oloc, "get_parent",
-                                 &snapid, sizeof(snapid), reply_buf, size);
-       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
-       if (ret < 0)
-               goto out_err;
+       dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+            __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+            pii.has_overlap, pii.overlap);
 
-       p = reply_buf;
-       end = reply_buf + ret;
-       ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, pool_id, out_err);
-       if (pool_id == CEPH_NOPOOL) {
+       if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
                /*
                 * Either the parent never existed, or we have
                 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 * overlap to 0.  The effect of this is that all new
                 * requests will be treated as if the image had no
                 * parent.
+                *
+                * If !pii.has_overlap, the parent image spec is not
+                * applicable.  It's there to avoid duplication in each
+                * snapshot record.
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (pool_id > (u64)U32_MAX) {
+       if (pii.pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)",
-                       (unsigned long long)pool_id, U32_MAX);
+                       (unsigned long long)pii.pool_id, U32_MAX);
                goto out_err;
        }
 
-       image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
-       if (IS_ERR(image_id)) {
-               ret = PTR_ERR(image_id);
-               goto out_err;
-       }
-       ceph_decode_64_safe(&p, end, snap_id, out_err);
-       ceph_decode_64_safe(&p, end, overlap, out_err);
-
        /*
         * The parent won't change (except when the clone is
         * flattened, already handled that).  So we only need to
         * record the parent spec we have not already done so.
         */
        if (!rbd_dev->parent_spec) {
-               parent_spec->pool_id = pool_id;
-               parent_spec->image_id = image_id;
-               parent_spec->snap_id = snap_id;
-
-               /* TODO: support cloning across namespaces */
-               if (rbd_dev->spec->pool_ns) {
-                       parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
-                                                      GFP_KERNEL);
-                       if (!parent_spec->pool_ns) {
-                               ret = -ENOMEM;
-                               goto out_err;
-                       }
+               parent_spec->pool_id = pii.pool_id;
+               if (pii.pool_ns && *pii.pool_ns) {
+                       parent_spec->pool_ns = pii.pool_ns;
+                       pii.pool_ns = NULL;
                }
+               parent_spec->image_id = pii.image_id;
+               pii.image_id = NULL;
+               parent_spec->snap_id = pii.snap_id;
 
                rbd_dev->parent_spec = parent_spec;
                parent_spec = NULL;     /* rbd_dev now owns this */
-       } else {
-               kfree(image_id);
        }
 
        /*
         * We always update the parent overlap.  If it's zero we issue
         * a warning, as we will proceed as if there was no parent.
         */
-       if (!overlap) {
+       if (!pii.overlap) {
                if (parent_spec) {
                        /* refresh, careful to warn just once */
                        if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                        rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
-       rbd_dev->parent_overlap = overlap;
+       rbd_dev->parent_overlap = pii.overlap;
 
 out:
        ret = 0;
 out_err:
-       kfree(reply_buf);
+       kfree(pii.pool_ns);
+       kfree(pii.image_id);
        rbd_spec_put(parent_spec);
-
        return ret;
 }
 
index b7d71914a32a8b66541fb308a9cd4aa95056fdd2..f2c631ce793cc8a342b44381592824bf902282f7 100644 (file)
@@ -58,7 +58,6 @@ MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
                             "until the card startup has completed.");
 
 static DEFINE_IDA(rsxx_disk_ida);
-static DEFINE_SPINLOCK(rsxx_ida_lock);
 
 /* --------------------Debugfs Setup ------------------- */
 
@@ -771,19 +770,10 @@ static int rsxx_pci_probe(struct pci_dev *dev,
        card->dev = dev;
        pci_set_drvdata(dev, card);
 
-       do {
-               if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
-                       st = -ENOMEM;
-                       goto failed_ida_get;
-               }
-
-               spin_lock(&rsxx_ida_lock);
-               st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
-               spin_unlock(&rsxx_ida_lock);
-       } while (st == -EAGAIN);
-
-       if (st)
+       st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
+       if (st < 0)
                goto failed_ida_get;
+       card->disk_id = st;
 
        st = pci_enable_device(dev);
        if (st)
@@ -985,9 +975,7 @@ failed_request_regions:
 failed_dma_mask:
        pci_disable_device(dev);
 failed_enable:
-       spin_lock(&rsxx_ida_lock);
-       ida_remove(&rsxx_disk_ida, card->disk_id);
-       spin_unlock(&rsxx_ida_lock);
+       ida_free(&rsxx_disk_ida, card->disk_id);
 failed_ida_get:
        kfree(card);
 
@@ -1050,6 +1038,7 @@ static void rsxx_pci_remove(struct pci_dev *dev)
        pci_disable_device(dev);
        pci_release_regions(dev);
 
+       ida_free(&rsxx_disk_ida, card->disk_id);
        kfree(card);
 }
 
index 2df11cc08a460a893e2ebcb7af510f70bd6e792a..845b0314ce3a7f599360d6c7e3d73c14a5fc2bbb 100644 (file)
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
        depends on BT_HCIUART
        depends on BT_HCIUART_SERDEV
        depends on GPIOLIB
+       depends on ACPI
        select BT_HCIUART_3WIRE
        select BT_RTL
        help
index ed2a5c7cb77fa0304ed7f3639631c3b9c8eb597e..4593baff2bc944f5a003bb666520062f768f52e5 100644 (file)
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
        fw_size = fw->size;
 
        /* The size of patch header is 30 bytes, should be skip */
-       if (fw_size < 30)
-               return -EINVAL;
+       if (fw_size < 30) {
+               err = -EINVAL;
+               goto free_fw;
+       }
 
        fw_size -= 30;
        fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
                fw_ptr += dlen;
        }
 
+free_fw:
        release_firmware(fw);
-
        return err;
 }
 
index 963bb0309e25d34a44382bc3a8e9e0556cca72d1..ea6238ed5c0eaa095ada2c2b0f88aea27707dcbf 100644 (file)
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
        }
        clear_bit(HCI_UART_PROTO_SET, &hu->flags);
 
+       percpu_free_rwsem(&hu->proto_lock);
+
        kfree(hu);
 }
 
index c9bac9dc4637e7b8ce9ad3eb15e484added77f79..e4fe954e63a9be53b74397c825bf4f57b06dcca5 100644 (file)
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
 
 /**
  * syc_ioremap - ioremap register space for the interconnect target module
- * @ddata: deviec driver data
+ * @ddata: device driver data
  *
  * Note that the interconnect target module registers can be anywhere
- * within the first child device address space. For example, SGX has
- * them at offset 0x1fc00 in the 32MB module address space. We just
- * what we need around the interconnect target module registers.
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
  */
 static int sysc_ioremap(struct sysc *ddata)
 {
-       u32 size = 0;
-
-       if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
-               size = ddata->offsets[SYSC_SYSSTATUS];
-       else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
-               size = ddata->offsets[SYSC_SYSCONFIG];
-       else if (ddata->offsets[SYSC_REVISION] >= 0)
-               size = ddata->offsets[SYSC_REVISION];
-       else
-               return -EINVAL;
+       int size;
 
-       size &= 0xfff00;
-       size += SZ_256;
+       size = max3(ddata->offsets[SYSC_REVISION],
+                   ddata->offsets[SYSC_SYSCONFIG],
+                   ddata->offsets[SYSC_SYSSTATUS]);
+
+       if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
+               return -EINVAL;
 
        ddata->module_va = devm_ioremap(ddata->dev,
                                        ddata->module_pa,
-                                       size);
+                                       size + sizeof(u32));
        if (!ddata->module_va)
                return -EIO;
 
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
        if (!pm_runtime_status_suspended(dev)) {
                error = pm_generic_runtime_suspend(dev);
                if (error) {
-                       dev_err(dev, "%s error at %i: %i\n",
-                               __func__, __LINE__, error);
+                       dev_warn(dev, "%s busy at %i: %i\n",
+                                __func__, __LINE__, error);
 
-                       return error;
+                       return 0;
                }
 
                error = sysc_runtime_suspend(ddata->dev);
index ce277ee0a28a21a328bee687d6872842a1cda5cc..40728491f37b6a46be7e59399c6530263d2d40c1 100644 (file)
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
        that CPU manufacturer (perhaps with the insistence or mandate
        of a Nation State's intelligence or law enforcement agencies)
        has not installed a hidden back door to compromise the CPU's
-       random number generation facilities.
-
+       random number generation facilities. This can also be configured
+       at boot with "random.trust_cpu=on/off".
index a3397664f80014b881387e1faa855a8102a0caa5..97d6856c9c0f98b3b7adf4ecd1d83805034c1e4b 100644 (file)
@@ -59,8 +59,6 @@ enum bt_states {
        BT_STATE_RESET3,
        BT_STATE_RESTART,
        BT_STATE_PRINTME,
-       BT_STATE_CAPABILITIES_BEGIN,
-       BT_STATE_CAPABILITIES_END,
        BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
@@ -86,7 +84,6 @@ struct si_sm_data {
        int             error_retries;  /* end of "common" fields */
        int             nonzero_status; /* hung BMCs stay all 0 */
        enum bt_states  complete;       /* to divert the state machine */
-       int             BT_CAP_outreqs;
        long            BT_CAP_req2rsp;
        int             BT_CAP_retries; /* Recommended retries */
 };
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
        case BT_STATE_RESET3:           return("RESET3");
        case BT_STATE_RESTART:          return("RESTART");
        case BT_STATE_LONG_BUSY:        return("LONG_BUSY");
-       case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
-       case BT_STATE_CAPABILITIES_END: return("CAP_END");
        }
        return("BAD STATE");
 }
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
        bt->complete = BT_STATE_IDLE;   /* end here */
        bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
        bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
-       /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
        return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-       unsigned char status, BT_CAP[8];
+       unsigned char status;
        static enum bt_states last_printed = BT_STATE_PRINTME;
        int i;
 
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                if (status & BT_H_BUSY)         /* clear a leftover H_BUSY */
                        BT_CONTROL(BT_H_BUSY);
 
-               bt->timeout = bt->BT_CAP_req2rsp;
-
-               /* Read BT capabilities if it hasn't been done yet */
-               if (!bt->BT_CAP_outreqs)
-                       BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
-                                       SI_SM_CALL_WITHOUT_DELAY);
                BT_SI_SM_RETURN(SI_SM_IDLE);
 
        case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                BT_STATE_CHANGE(BT_STATE_XACTION_START,
                                SI_SM_CALL_WITH_DELAY);
 
-       /*
-        * Get BT Capabilities, using timing of upper level state machine.
-        * Set outreqs to prevent infinite loop on timeout.
-        */
-       case BT_STATE_CAPABILITIES_BEGIN:
-               bt->BT_CAP_outreqs = 1;
-               {
-                       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
-                       bt->state = BT_STATE_IDLE;
-                       bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
-               }
-               bt->complete = BT_STATE_CAPABILITIES_END;
-               BT_STATE_CHANGE(BT_STATE_XACTION_START,
-                               SI_SM_CALL_WITH_DELAY);
-
-       case BT_STATE_CAPABILITIES_END:
-               i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
-               bt_init_data(bt, bt->io);
-               if ((i == 8) && !BT_CAP[2]) {
-                       bt->BT_CAP_outreqs = BT_CAP[3];
-                       bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
-                       bt->BT_CAP_retries = BT_CAP[7];
-               } else
-                       printk(KERN_WARNING "IPMI BT: using default values\n");
-               if (!bt->BT_CAP_outreqs)
-                       bt->BT_CAP_outreqs = 1;
-               printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
-                       bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
-               bt->timeout = bt->BT_CAP_req2rsp;
-               return SI_SM_CALL_WITHOUT_DELAY;
-
        default:        /* should never occur */
                return error_recovery(bt,
                                      status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
 static int bt_detect(struct si_sm_data *bt)
 {
+       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+       unsigned char BT_CAP[8];
+       enum si_sm_result smi_result;
+       int rv;
+
        /*
         * It's impossible for the BT status and interrupt registers to be
         * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
                return 1;
        reset_flags(bt);
+
+       /*
+        * Try getting the BT capabilities here.
+        */
+       rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+       if (rv) {
+               dev_warn(bt->io->dev,
+                        "Can't start capabilities transaction: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       smi_result = SI_SM_CALL_WITHOUT_DELAY;
+       for (;;) {
+               if (smi_result == SI_SM_CALL_WITH_DELAY ||
+                   smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+                       schedule_timeout_uninterruptible(1);
+                       smi_result = bt_event(bt, jiffies_to_usecs(1));
+               } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+                       smi_result = bt_event(bt, 0);
+               } else
+                       break;
+       }
+
+       rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+       bt_init_data(bt, bt->io);
+       if (rv < 8) {
+               dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       if (BT_CAP[2]) {
+               dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+               dev_warn(bt->io->dev, "using default values\n");
+       } else {
+               bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+               bt->BT_CAP_retries = BT_CAP[7];
+       }
+
+       dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+                bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
        return 0;
 }
 
index 51832b8a2c6283f9b1ccfd07a60997fdade8d4cf..7fc9612070a1f1abe43b489f226c84a9c4c50632 100644 (file)
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
 
        rv = handlers->start_processing(send_info, intf);
        if (rv)
-               goto out;
+               goto out_err;
 
        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
        if (rv) {
                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
-               goto out;
+               goto out_err_started;
        }
 
        mutex_lock(&intf->bmc_reg_mutex);
        rv = __scan_channels(intf, &id);
        mutex_unlock(&intf->bmc_reg_mutex);
+       if (rv)
+               goto out_err_bmc_reg;
 
- out:
-       if (rv) {
-               ipmi_bmc_unregister(intf);
-               list_del_rcu(&intf->link);
-               mutex_unlock(&ipmi_interfaces_mutex);
-               synchronize_srcu(&ipmi_interfaces_srcu);
-               cleanup_srcu_struct(&intf->users_srcu);
-               kref_put(&intf->refcount, intf_free);
-       } else {
-               /*
-                * Keep memory order straight for RCU readers.  Make
-                * sure everything else is committed to memory before
-                * setting intf_num to mark the interface valid.
-                */
-               smp_wmb();
-               intf->intf_num = i;
-               mutex_unlock(&ipmi_interfaces_mutex);
+       /*
+        * Keep memory order straight for RCU readers.  Make
+        * sure everything else is committed to memory before
+        * setting intf_num to mark the interface valid.
+        */
+       smp_wmb();
+       intf->intf_num = i;
+       mutex_unlock(&ipmi_interfaces_mutex);
 
-               /* After this point the interface is legal to use. */
-               call_smi_watchers(i, intf->si_dev);
-       }
+       /* After this point the interface is legal to use. */
+       call_smi_watchers(i, intf->si_dev);
+
+       return 0;
+
+ out_err_bmc_reg:
+       ipmi_bmc_unregister(intf);
+ out_err_started:
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
+ out_err:
+       list_del_rcu(&intf->link);
+       mutex_unlock(&ipmi_interfaces_mutex);
+       synchronize_srcu(&ipmi_interfaces_srcu);
+       cleanup_srcu_struct(&intf->users_srcu);
+       kref_put(&intf->refcount, intf_free);
 
        return rv;
 }
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
        }
        srcu_read_unlock(&intf->users_srcu, index);
 
-       intf->handlers->shutdown(intf->send_info);
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
 
        cleanup_smi_msgs(intf);
 
index 90ec010bffbd9776c012586b4e01b24cdd0bd2d6..5faa917df1b629647cb7fecd291a1d1b8d80eae9 100644 (file)
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
                 si_to_str[new_smi->io.si_type]);
 
        WARN_ON(new_smi->io.dev->init_name != NULL);
-       kfree(init_name);
-
-       return 0;
-
-out_err:
-       if (new_smi->intf) {
-               ipmi_unregister_smi(new_smi->intf);
-               new_smi->intf = NULL;
-       }
 
+ out_err:
        kfree(init_name);
-
        return rv;
 }
 
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
 
        kfree(smi_info->si_sm);
        smi_info->si_sm = NULL;
+
+       smi_info->intf = NULL;
 }
 
 /*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
 
        list_del(&smi_info->link);
 
-       if (smi_info->intf) {
+       if (smi_info->intf)
                ipmi_unregister_smi(smi_info->intf);
-               smi_info->intf = NULL;
-       }
 
        if (smi_info->pdev) {
                if (smi_info->pdev_registered)
index 18e4650c233b1de514ee836dfc28021f35953a5b..29e67a80fb208f804e4ed1fb0a560142002c7ffc 100644 (file)
@@ -181,6 +181,8 @@ struct ssif_addr_info {
        struct device *dev;
        struct i2c_client *client;
 
+       struct i2c_client *added_client;
+
        struct mutex clients_mutex;
        struct list_head clients;
 
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
                complete(&ssif_info->wake_thread);
                kthread_stop(ssif_info->thread);
        }
-
-       /*
-        * No message can be outstanding now, we have removed the
-        * upper layer and it permitted us to do so.
-        */
-       kfree(ssif_info);
 }
 
 static int ssif_remove(struct i2c_client *client)
 {
        struct ssif_info *ssif_info = i2c_get_clientdata(client);
-       struct ipmi_smi *intf;
        struct ssif_addr_info *addr_info;
 
        if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
         */
-       intf = ssif_info->intf;
-       ssif_info->intf = NULL;
-       ipmi_unregister_smi(intf);
+       ipmi_unregister_smi(ssif_info->intf);
 
        list_for_each_entry(addr_info, &ssif_infos, link) {
                if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
                }
        }
 
+       kfree(ssif_info);
+
        return 0;
 }
 
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
  out:
        if (rv) {
-               /*
-                * Note that if addr_info->client is assigned, we
-                * leave it.  The i2c client hangs around even if we
-                * return a failure here, and the failure here is not
-                * propagated back to the i2c code.  This seems to be
-                * design intent, strange as it may be.  But if we
-                * don't leave it, ssif_platform_remove will not remove
-                * the client like it should.
-                */
+               if (addr_info)
+                       addr_info->client = NULL;
+
                dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
                kfree(ssif_info);
        }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
        if (adev->type != &i2c_adapter_type)
                return 0;
 
-       i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+       addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+                                                &addr_info->binfo);
 
        if (!addr_info->adapter_name)
                return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
                return 0;
 
        mutex_lock(&ssif_infos_mutex);
-       i2c_unregister_device(addr_info->client);
+       i2c_unregister_device(addr_info->added_client);
 
        list_del(&addr_info->link);
        kfree(addr_info);
index bb882ab161fe1bbb4b678cc9bf105b77273296e3..e6124bd548df211317e7a69fa42da99849382e69 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "kcs_bmc.h"
 
+#define DEVICE_NAME "ipmi-kcs"
+
 #define KCS_MSG_BUFSIZ    1000
 
 #define KCS_ZERO_DATA     0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
        if (!kcs_bmc)
                return NULL;
 
-       dev_set_name(dev, "ipmi-kcs%u", channel);
-
        spin_lock_init(&kcs_bmc->lock);
        kcs_bmc->channel = channel;
 
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
                return NULL;
 
        kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
-       kcs_bmc->miscdev.name = dev_name(dev);
+       kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+                                              DEVICE_NAME, channel);
        kcs_bmc->miscdev.fops = &kcs_bmc_fops;
 
        return kcs_bmc;
index bf5f99fc36f1b654aad2f4cfe196c9b66814be82..c75b6cdf00533ac82ec6fb957bc1669f1f44a34d 100644 (file)
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
 
 static void invalidate_batched_entropy(void);
 
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+       return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
 static void crng_initialize(struct crng_state *crng)
 {
        int             i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
                }
                crng->state[i] ^= rv;
        }
-#ifdef CONFIG_RANDOM_TRUST_CPU
-       if (arch_init) {
+       if (trust_cpu && arch_init) {
                crng_init = 2;
                pr_notice("random: crng done (trusting CPU's manufacturer)\n");
        }
-#endif
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
index 740af90a950820055d9934f66e080c338f0cf143..c5edf8f2fd1969337b7803ef96884ee9164495fb 100644 (file)
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
        if (!clk_base)
                goto npcm7xx_init_error;
 
-       npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
-               NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
+       npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
+                                  NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
        if (!npcm7xx_clk_data)
                goto npcm7xx_init_np_err;
 
index 08ef69945ffbf425efa001d48c6c9b5b0f4333eb..d977193842dfed1fead553dd2240013c5a0d380a 100644 (file)
@@ -55,6 +55,7 @@ struct clk_plt_data {
        u8 nparents;
        struct clk_plt *clks[PMC_CLK_NUM];
        struct clk_lookup *mclk_lookup;
+       struct clk_lookup *ether_clk_lookup;
 };
 
 /* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
        pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
        spin_lock_init(&pclk->lock);
 
-       /*
-        * If the clock was already enabled by the firmware mark it as critical
-        * to avoid it being gated by the clock framework if no driver owns it.
-        */
-       if (plt_clk_is_enabled(&pclk->hw))
-               init.flags |= CLK_IS_CRITICAL;
-
        ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
        if (ret) {
                pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
                goto err_unreg_clk_plt;
        }
 
+       data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
+                                                 "ether_clk", NULL);
+       if (!data->ether_clk_lookup) {
+               err = -ENOMEM;
+               goto err_drop_mclk;
+       }
+
        plt_clk_free_parent_names_loop(parent_names, data->nparents);
 
        platform_set_drvdata(pdev, data);
        return 0;
 
+err_drop_mclk:
+       clkdev_drop(data->mclk_lookup);
 err_unreg_clk_plt:
        plt_clk_unregister_loop(data, i);
        plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
 
        data = platform_get_drvdata(pdev);
 
+       clkdev_drop(data->ether_clk_lookup);
        clkdev_drop(data->mclk_lookup);
        plt_clk_unregister_loop(data, PMC_CLK_NUM);
        plt_clk_unregister_parents(data);
index fb62f393800825f1fa4433ea14a7931dfb6409d6..3a0996f2d5564054d782edf83224f425e3799059 100644 (file)
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
                clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
                0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
 
-       clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk);
+       clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
 
        hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
                0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
index 110483f0e3fbad97c1304b633b5d55c940e1fe70..e26a40971b263ed5f5cb113c938f4d9f84109862 100644 (file)
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                if (idx == -1)
                        idx = i; /* first enabled state */
                if (s->target_residency > data->predicted_us) {
-                       if (!tick_nohz_tick_stopped())
+                       if (data->predicted_us < TICK_USEC)
                                break;
 
+                       if (!tick_nohz_tick_stopped()) {
+                               /*
+                                * If the state selected so far is shallow,
+                                * waking up early won't hurt, so retain the
+                                * tick in that case and let the governor run
+                                * again in the next iteration of the loop.
+                                */
+                               expected_interval = drv->states[idx].target_residency;
+                               break;
+                       }
+
                        /*
                         * If the state selected so far is shallow and this
                         * state's target residency matches the time till the
index 6e61cc93c2b0da3be9d2f68bf8e063deaf122054..d7aa7d7ff102fab24aa86bd1d13190348973a1bf 100644 (file)
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        int ret = 0;
 
        if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
-               crypto_ablkcipher_set_flags(ablkcipher,
-                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
                dev_err(jrdev, "key size mismatch\n");
-               return -EINVAL;
+               goto badkey;
        }
 
        ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        return ret;
 badkey:
        crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-       return 0;
+       return -EINVAL;
 }
 
 /*
index 578ea63a31098e46a7e32554f2c7acff5f8137cd..f26d62e5533a7a5a622d6e2f0661dcb3d0ebdc40 100644 (file)
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
        dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
        dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 /* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
                goto unmap_p;
        }
 
-       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp1_dma)) {
                dev_err(dev, "Unable to map RSA tmp1 memory\n");
                goto unmap_q;
        }
 
-       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp2_dma)) {
                dev_err(dev, "Unable to map RSA tmp2 memory\n");
                goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
        return 0;
 
 unmap_tmp1:
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_q:
        dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
 unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
                goto unmap_dq;
        }
 
-       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp1_dma)) {
                dev_err(dev, "Unable to map RSA tmp1 memory\n");
                goto unmap_qinv;
        }
 
-       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp2_dma)) {
                dev_err(dev, "Unable to map RSA tmp2 memory\n");
                goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
        return 0;
 
 unmap_tmp1:
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_qinv:
        dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
 unmap_dq:
index f4f258075b895a8c55fbd836d35b1b6b399beed8..acdd72016ffe154fab70f8de0ff43f6251b50e28 100644 (file)
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
                BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
 
                /* Unmap just-run descriptor so we can post-process */
-               dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+               dma_unmap_single(dev,
+                                caam_dma_to_cpu(jrp->outring[hw_idx].desc),
                                 jrp->entinfo[sw_idx].desc_size,
                                 DMA_TO_DEVICE);
 
index 9a476bb6d4c7eace932edb09b040098eccee7aa0..af596455b420f60d68096b8f2d4e9c71745785fc 100644 (file)
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
        /* requests in backlog queues */
        atomic_t backlog_count;
 
+       int write_idx;
        /* command size 32B/64B */
        u8 instr_size;
        u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
        struct bh_data *slc;
 };
 
-/* NITROX-5 driver state */
+/* NITROX-V driver state */
 #define NITROX_UCODE_LOADED    0
 #define NITROX_READY           1
 
index ebe267379ac95ede190832e8a9c0a1df174e6866..4d31df07777f63129715175f7aa4bc84b50fe49b 100644 (file)
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
        cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
        cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
        cmdq->qsize = (qsize + PKT_IN_ALIGN);
+       cmdq->write_idx = 0;
 
        spin_lock_init(&cmdq->response_lock);
        spin_lock_init(&cmdq->cmdq_lock);
index deaefd532aaa155755faba33bba5fbee7b11430f..4a362fc22f6287638f749dfeacde20bc62517896 100644 (file)
  *   Invalid flag options in AES-CCM IV.
  */
 
+static inline int incr_index(int index, int count, int max)
+{
+       if ((index + count) >= max)
+               index = index + count - max;
+       else
+               index += count;
+
+       return index;
+}
+
 /**
  * dma_free_sglist - unmap and free the sg lists.
  * @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
                          struct nitrox_cmdq *cmdq)
 {
        struct nitrox_device *ndev = sr->ndev;
-       union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
-       u64 offset;
+       int idx;
        u8 *ent;
 
        spin_lock_bh(&cmdq->cmdq_lock);
 
-       /* get the next write offset */
-       offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
-       pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
+       idx = cmdq->write_idx;
        /* copy the instruction */
-       ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
+       ent = cmdq->head + (idx * cmdq->instr_size);
        memcpy(ent, &sr->instr, cmdq->instr_size);
-       /* flush the command queue updates */
-       dma_wmb();
 
-       sr->tstamp = jiffies;
        atomic_set(&sr->status, REQ_POSTED);
        response_list_add(sr, cmdq);
+       sr->tstamp = jiffies;
+       /* flush the command queue updates */
+       dma_wmb();
 
        /* Ring doorbell with count 1 */
        writeq(1, cmdq->dbell_csr_addr);
        /* orders the doorbell rings */
        mmiowb();
 
+       cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
+
        spin_unlock_bh(&cmdq->cmdq_lock);
 }
 
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
        struct nitrox_softreq *sr, *tmp;
        int ret = 0;
 
+       if (!atomic_read(&cmdq->backlog_count))
+               return 0;
+
        spin_lock_bh(&cmdq->backlog_lock);
 
        list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
 
                /* submit until space available */
                if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
-                       ret = -EBUSY;
+                       ret = -ENOSPC;
                        break;
                }
                /* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
 {
        struct nitrox_cmdq *cmdq = sr->cmdq;
        struct nitrox_device *ndev = sr->ndev;
-       int ret = -EBUSY;
+
+       /* try to post backlog requests */
+       post_backlog_cmds(cmdq);
 
        if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
                if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -EAGAIN;
-
+                       return -ENOSPC;
+               /* add to backlog list */
                backlog_list_add(sr, cmdq);
-       } else {
-               ret = post_backlog_cmds(cmdq);
-               if (ret) {
-                       backlog_list_add(sr, cmdq);
-                       return ret;
-               }
-               post_se_instr(sr, cmdq);
-               ret = -EINPROGRESS;
+               return -EBUSY;
        }
-       return ret;
+       post_se_instr(sr, cmdq);
+
+       return -EINPROGRESS;
 }
 
 /**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
         */
        sr->instr.fdata[0] = *((u64 *)&req->gph);
        sr->instr.fdata[1] = 0;
-       /* flush the soft_req changes before posting the cmd */
-       wmb();
 
        ret = nitrox_enqueue_request(sr);
-       if (ret == -EAGAIN)
+       if (ret == -ENOSPC)
                goto send_fail;
 
        return ret;
index 218739b961fe37b8c633443173d3026e19df0a69..72790d88236d4d73b56c5f1d26efcbfadc033c6b 100644 (file)
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
 static struct sev_misc_dev *misc_dev;
 static struct psp_device *psp_master;
 
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
 static struct psp_device *psp_alloc_struct(struct sp_device *sp)
 {
        struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
        return IRQ_HANDLED;
 }
 
-static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+                           unsigned int *reg, unsigned int timeout)
 {
-       wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+       int ret;
+
+       ret = wait_event_timeout(psp->sev_int_queue,
+                       psp->sev_int_rcvd, timeout * HZ);
+       if (!ret)
+               return -ETIMEDOUT;
+
        *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+       return 0;
 }
 
 static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
        if (!psp)
                return -ENODEV;
 
+       if (psp_dead)
+               return -EBUSY;
+
        /* Get the physical address of the command buffer */
        phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
        phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
 
-       dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
-               cmd, phys_msb, phys_lsb);
+       dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+               cmd, phys_msb, phys_lsb, psp_timeout);
 
        print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
                             sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
        iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
 
        /* wait for command completion */
-       sev_wait_cmd_ioc(psp, &reg);
+       ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+       if (ret) {
+               if (psp_ret)
+                       *psp_ret = 0;
+
+               dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+               psp_dead = true;
+
+               return ret;
+       }
+
+       psp_timeout = psp_cmd_timeout;
 
        if (psp_ret)
                *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
 
        psp_master = sp->psp_data;
 
+       psp_timeout = psp_probe_timeout;
+
        if (sev_get_api_version())
                goto err;
 
index a53a0e6ba024e8175338859c634c81e94859cd13..7725b6ee14efb2ecc89d9c0822aa19903284d3f5 100644 (file)
@@ -96,6 +96,10 @@ enum csk_flags {
        CSK_CONN_INLINE,        /* Connection on HW */
 };
 
+enum chtls_cdev_state {
+       CHTLS_CDEV_STATE_UP = 1
+};
+
 struct listen_ctx {
        struct sock *lsk;
        struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
        unsigned int send_page_order;
        int max_host_sndbuf;
        struct key_map kmap;
+       unsigned int cdev_state;
 };
 
 struct chtls_hws {
index 9b07f9165658beeb07a29edc4e00c3849176d2ec..f59b044ebd25528864d055c04b90f67b83248eac 100644 (file)
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
        tlsdev->hash = chtls_create_hash;
        tlsdev->unhash = chtls_destroy_hash;
        tls_register_device(&cdev->tlsdev);
+       cdev->cdev_state = CHTLS_CDEV_STATE_UP;
 }
 
 static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
        struct chtls_dev *cdev, *tmp;
 
        mutex_lock(&cdev_mutex);
-       list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
-               chtls_free_uld(cdev);
+       list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
+                       chtls_free_uld(cdev);
+       }
        mutex_unlock(&cdev_mutex);
 }
 
index 5285ece4f33a36df39bfd18068dd14ce6a1213db..b71895871be3f1f2ec0b15d19d4087a16f40016f 100644 (file)
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                ret = crypto_skcipher_encrypt(req);
                skcipher_request_zero(req);
        } else {
-               preempt_disable();
-               pagefault_disable();
-               enable_kernel_vsx();
-
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        aes_p8_cbc_encrypt(walk.src.virt.addr,
                                           walk.dst.virt.addr,
                                           nbytes & AES_BLOCK_MASK,
                                           &ctx->enc_key, walk.iv, 1);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
+
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
 
        return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                ret = crypto_skcipher_decrypt(req);
                skcipher_request_zero(req);
        } else {
-               preempt_disable();
-               pagefault_disable();
-               enable_kernel_vsx();
-
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        aes_p8_cbc_encrypt(walk.src.virt.addr,
                                           walk.dst.virt.addr,
                                           nbytes & AES_BLOCK_MASK,
                                           &ctx->dec_key, walk.iv, 0);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
+
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
 
        return ret;
index 8bd9aff0f55fba6639b67147cf97c2fcfce12bfc..e9954a7d46944d36cd2aeffdfd8202b54c793d71 100644 (file)
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
                ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
                skcipher_request_zero(req);
        } else {
+               blkcipher_walk_init(&walk, dst, src, nbytes);
+
+               ret = blkcipher_walk_virt(desc, &walk);
+
                preempt_disable();
                pagefault_disable();
                enable_kernel_vsx();
 
-               blkcipher_walk_init(&walk, dst, src, nbytes);
-
-               ret = blkcipher_walk_virt(desc, &walk);
                iv = walk.iv;
                memset(tweak, 0, AES_BLOCK_SIZE);
                aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
+               disable_kernel_vsx();
+               pagefault_enable();
+               preempt_enable();
+
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        if (enc)
                                aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                                nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
                        else
                                aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                                nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
 
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
        return ret;
 }
index 0a2acd7993f0b3a561cf91beb0b4200838f601f4..bbe4d72ca105b001e36b1d09d382ee9e3a89ee7c 100644 (file)
@@ -248,13 +248,12 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
        return -1;
 }
 
-static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
+                               struct vm_fault *vmf, pfn_t *pfn)
 {
        struct device *dev = &dev_dax->dev;
        struct dax_region *dax_region;
-       int rc = VM_FAULT_SIGBUS;
        phys_addr_t phys;
-       pfn_t pfn;
        unsigned int fault_size = PAGE_SIZE;
 
        if (check_vma(dev_dax, vmf->vma, __func__))
@@ -276,26 +275,19 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
-
-       rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
-
-       if (rc == -ENOMEM)
-               return VM_FAULT_OOM;
-       if (rc < 0 && rc != -EBUSY)
-               return VM_FAULT_SIGBUS;
+       *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-       return VM_FAULT_NOPAGE;
+       return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
 }
 
-static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
+                               struct vm_fault *vmf, pfn_t *pfn)
 {
        unsigned long pmd_addr = vmf->address & PMD_MASK;
        struct device *dev = &dev_dax->dev;
        struct dax_region *dax_region;
        phys_addr_t phys;
        pgoff_t pgoff;
-       pfn_t pfn;
        unsigned int fault_size = PMD_SIZE;
 
        if (check_vma(dev_dax, vmf->vma, __func__))
@@ -331,21 +323,21 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+       *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-       return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
+       return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
                        vmf->flags & FAULT_FLAG_WRITE);
 }
 
 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
+                               struct vm_fault *vmf, pfn_t *pfn)
 {
        unsigned long pud_addr = vmf->address & PUD_MASK;
        struct device *dev = &dev_dax->dev;
        struct dax_region *dax_region;
        phys_addr_t phys;
        pgoff_t pgoff;
-       pfn_t pfn;
        unsigned int fault_size = PUD_SIZE;
 
 
@@ -382,23 +374,27 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
        }
 
-       pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+       *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-       return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
+       return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
                        vmf->flags & FAULT_FLAG_WRITE);
 }
 #else
-static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
+static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
+                               struct vm_fault *vmf, pfn_t *pfn)
 {
        return VM_FAULT_FALLBACK;
 }
 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
-static int dev_dax_huge_fault(struct vm_fault *vmf,
+static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size)
 {
-       int rc, id;
        struct file *filp = vmf->vma->vm_file;
+       unsigned long fault_size;
+       vm_fault_t rc = VM_FAULT_SIGBUS;
+       int id;
+       pfn_t pfn;
        struct dev_dax *dev_dax = filp->private_data;
 
        dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
@@ -408,23 +404,49 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
        id = dax_read_lock();
        switch (pe_size) {
        case PE_SIZE_PTE:
-               rc = __dev_dax_pte_fault(dev_dax, vmf);
+               fault_size = PAGE_SIZE;
+               rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
                break;
        case PE_SIZE_PMD:
-               rc = __dev_dax_pmd_fault(dev_dax, vmf);
+               fault_size = PMD_SIZE;
+               rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
                break;
        case PE_SIZE_PUD:
-               rc = __dev_dax_pud_fault(dev_dax, vmf);
+               fault_size = PUD_SIZE;
+               rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
                break;
        default:
                rc = VM_FAULT_SIGBUS;
        }
+
+       if (rc == VM_FAULT_NOPAGE) {
+               unsigned long i;
+               pgoff_t pgoff;
+
+               /*
+                * In the device-dax case the only possibility for a
+                * VM_FAULT_NOPAGE result is when device-dax capacity is
+                * mapped. No need to consider the zero page, or racing
+                * conflicting mappings.
+                */
+               pgoff = linear_page_index(vmf->vma, vmf->address
+                               & ~(fault_size - 1));
+               for (i = 0; i < fault_size / PAGE_SIZE; i++) {
+                       struct page *page;
+
+                       page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+                       if (page->mapping)
+                               continue;
+                       page->mapping = filp->f_mapping;
+                       page->index = pgoff + i;
+               }
+       }
        dax_read_unlock(id);
 
        return rc;
 }
 
-static int dev_dax_fault(struct vm_fault *vmf)
+static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
 {
        return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
 }
index fd49b24fd6afd344335e38438fce9dd981d928c9..99e2aace8078c87fde81019ccd3380fcccc666ec 100644 (file)
@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
        if (rc)
                return rc;
 
-       rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
-                                                       &dax_pmem->ref);
-       if (rc)
+       rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+       if (rc) {
+               percpu_ref_exit(&dax_pmem->ref);
                return rc;
+       }
 
        dax_pmem->pgmap.ref = &dax_pmem->ref;
        addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
-       if (IS_ERR(addr))
+       if (IS_ERR(addr)) {
+               devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+               percpu_ref_exit(&dax_pmem->ref);
                return PTR_ERR(addr);
+       }
 
        rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
                                                        &dax_pmem->ref);
index 45276abf03aa2bd52aa9af56b8cbd45a4b1e5135..6e928f37d08429defdcecf71d1bf35c0ef5b613e 100644 (file)
@@ -89,7 +89,6 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
        struct request_queue *q;
        pgoff_t pgoff;
        int err, id;
-       void *kaddr;
        pfn_t pfn;
        long len;
        char buf[BDEVNAME_SIZE];
@@ -122,7 +121,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
        }
 
        id = dax_read_lock();
-       len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+       len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn);
        dax_read_unlock(id);
 
        put_dax(dax_dev);
index 272bed6c8ba79d17cff68cf54d9b0aae20a28546..f1a441ab395d7529ebbc4f8d59e891e20c61419d 100644 (file)
@@ -161,9 +161,7 @@ static void chan_dev_release(struct device *dev)
 
        chan_dev = container_of(dev, typeof(*chan_dev), device);
        if (atomic_dec_and_test(chan_dev->idr_ref)) {
-               mutex_lock(&dma_list_mutex);
-               ida_remove(&dma_ida, chan_dev->dev_id);
-               mutex_unlock(&dma_list_mutex);
+               ida_free(&dma_ida, chan_dev->dev_id);
                kfree(chan_dev->idr_ref);
        }
        kfree(chan_dev);
@@ -898,17 +896,12 @@ static bool device_has_all_tx_types(struct dma_device *device)
 
 static int get_dma_id(struct dma_device *device)
 {
-       int rc;
-
-       do {
-               if (!ida_pre_get(&dma_ida, GFP_KERNEL))
-                       return -ENOMEM;
-               mutex_lock(&dma_list_mutex);
-               rc = ida_get_new(&dma_ida, &device->dev_id);
-               mutex_unlock(&dma_list_mutex);
-       } while (rc == -EAGAIN);
+       int rc = ida_alloc(&dma_ida, GFP_KERNEL);
 
-       return rc;
+       if (rc < 0)
+               return rc;
+       device->dev_id = rc;
+       return 0;
 }
 
 /**
@@ -1092,9 +1085,7 @@ int dma_async_device_register(struct dma_device *device)
 err_out:
        /* if we never registered a channel just release the idr */
        if (atomic_read(idr_ref) == 0) {
-               mutex_lock(&dma_list_mutex);
-               ida_remove(&dma_ida, device->dev_id);
-               mutex_unlock(&dma_list_mutex);
+               ida_free(&dma_ida, device->dev_id);
                kfree(idr_ref);
                return rc;
        }
index b76cb17d879c635efbfec026da59a5c608ff0a6e..adfd316db1a892a5f0bbae831ae6569c8c2ba270 100644 (file)
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
        int ret;
        struct device *dev = &mbdev->dev;
 
-       mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+       mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
        if (!mic_dma_dev) {
                ret = -ENOMEM;
                goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
 reg_error:
        mic_dma_uninit(mic_dma_dev);
 init_error:
-       kfree(mic_dma_dev);
        mic_dma_dev = NULL;
 alloc_error:
        dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
 {
        mic_dma_uninit(mic_dma_dev);
-       kfree(mic_dma_dev);
 }
 
 /* DEBUGFS CODE */
index 721e6c57beae5bfecb0f135c514fedd9ea2bdff0..64342944d9175c54918100c36f0b43a20e6acae3 100644 (file)
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
                                        le32_to_cpu(attr->sustained_freq_khz);
                dom_info->sustained_perf_level =
                                        le32_to_cpu(attr->sustained_perf_level);
-               dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+               if (!dom_info->sustained_freq_khz ||
+                   !dom_info->sustained_perf_level)
+                       /* CPUFreq converts to kHz, hence default 1000 */
+                       dom_info->mult_factor = 1000;
+               else
+                       dom_info->mult_factor =
+                                       (dom_info->sustained_freq_khz * 1000) /
                                        dom_info->sustained_perf_level;
                memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
        }
index fc9fd2d0482f4d8356f4ae8e1acfacdfdbfe3b65..0b840531ef33a2e1f387d15f550fb808df5ae8bf 100644 (file)
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
                /* Create region for each port */
                fme_region = dfl_fme_create_region(pdata, mgr,
                                                   fme_br->br, i);
-               if (!fme_region) {
+               if (IS_ERR(fme_region)) {
                        ret = PTR_ERR(fme_region);
                        goto destroy_region;
                }
index 3530ccd17e04487153cb5d1cff0b9796ca38673a..da9781a2ef4adf230fc6704945fc02ac2bca3823 100644 (file)
@@ -41,6 +41,8 @@ struct adp5588_gpio {
        uint8_t int_en[3];
        uint8_t irq_mask[3];
        uint8_t irq_stat[3];
+       uint8_t int_input_en[3];
+       uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
        struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
        int i;
 
-       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+               if (dev->int_input_en[i]) {
+                       mutex_lock(&dev->lock);
+                       dev->dir[i] &= ~dev->int_input_en[i];
+                       dev->int_input_en[i] = 0;
+                       adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+                                          dev->dir[i]);
+                       mutex_unlock(&dev->lock);
+               }
+
+               if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+                       dev->int_lvl_cached[i] = dev->int_lvl[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+                                          dev->int_lvl[i]);
+               }
+
                if (dev->int_en[i] ^ dev->irq_mask[i]) {
                        dev->int_en[i] = dev->irq_mask[i];
                        adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
                                           dev->int_en[i]);
                }
+       }
 
        mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
        else
                return -EINVAL;
 
-       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-                          dev->int_lvl[bank]);
+       dev->int_input_en[bank] |= bit;
 
        return 0;
 }
index 28da700f5f5258f54117dee459904d2140d0d411..044888fd96a1f642617cba9a9dfc98655c5493aa 100644 (file)
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
 out_unregister:
        dwapb_gpio_unregister(gpio);
        dwapb_irq_teardown(gpio);
+       clk_disable_unprepare(gpio->clk);
 
        return err;
 }
index c48ed9d89ff5f7b74eb03fb7ee079fa2b18e0dcf..8b9d7e42c600b60d26bad7f26c32ed938be6e7a2 100644 (file)
@@ -25,7 +25,6 @@
 
 struct acpi_gpio_event {
        struct list_head node;
-       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
        struct mutex conn_lock;
        struct gpio_chip *chip;
        struct list_head events;
+       struct list_head deferred_req_irqs_list_entry;
 };
 
-static LIST_HEAD(acpi_gpio_initial_sync_list);
-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+/*
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * late_initcall_sync handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run.  This list contains gpiochips
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
 
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       if (!list_empty(&event->initial_sync_list))
-               list_del_init(&event->initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
-       value = gpiod_get_value(desc);
+       value = gpiod_get_value_cansleep(desc);
 
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
-       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
         * may refer to OperationRegions from other (builtin) drivers which
         * may be probed after us.
         */
-       if (handler == acpi_gpio_irq_handler &&
-           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
-               acpi_gpio_add_to_initial_sync_list(event);
+       if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+           ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+               handler(event->irq, event);
 
        return AE_OK;
 
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        struct acpi_gpio_chip *acpi_gpio;
        acpi_handle handle;
        acpi_status status;
+       bool defer;
 
        if (!chip->parent || !chip->to_irq)
                return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       defer = !acpi_gpio_deferred_req_irqs_done;
+       if (defer)
+               list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+                        &acpi_gpio_deferred_req_irqs_list);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+       if (defer)
+               return;
+
        acpi_walk_resources(handle, "_AEI",
                            acpi_gpiochip_request_interrupt, acpi_gpio);
 }
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
-               acpi_gpio_del_from_initial_sync_list(event);
-
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 
        acpi_gpio->chip = chip;
        INIT_LIST_HEAD(&acpi_gpio->events);
+       INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
 
        status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
        return con_id == NULL;
 }
 
-/* Sync the initial state of handlers after all builtin drivers have probed */
-static int acpi_gpio_initial_sync(void)
+/* Run deferred acpi_gpiochip_request_interrupts() */
+static int acpi_gpio_handle_deferred_request_interrupts(void)
 {
-       struct acpi_gpio_event *event, *ep;
+       struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       list_for_each_entry_safe(acpi_gpio, tmp,
+                                &acpi_gpio_deferred_req_irqs_list,
+                                deferred_req_irqs_list_entry) {
+               acpi_handle handle;
 
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
-                                initial_sync_list) {
-               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
-               list_del_init(&event->initial_sync_list);
+               handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+               acpi_walk_resources(handle, "_AEI",
+                                   acpi_gpiochip_request_interrupt, acpi_gpio);
+
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
        }
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
 
        return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_initial_sync);
+late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
index a4f1157d6aa0893707d3880c9f0be4639c909b03..d4e7a09598faedbecb1ccdee24e6138b70e4e7d8 100644 (file)
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
        struct of_phandle_args *gpiospec = data;
 
        return chip->gpiodev->dev.of_node == gpiospec->np &&
+                               chip->of_xlate &&
                                chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
index f8bbbb3a95043ebee9d5d02471b83e98fb923a0c..0c791e35acf02f3c71e17c77126ec1f9e97c6fe5 100644 (file)
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr)
+                       void **cpu_ptr, bool mqd_gfx9)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+
+       if (mqd_gfx9)
+               bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+
        r = amdgpu_bo_create(adev, &bp, &bo);
        if (r) {
                dev_err(adev->dev,
index 2f379c183ed20be8af61e839ebbd75b9890a8593..cc9aeab5468c8559b5b47fa56e452d6b2feb4d11 100644 (file)
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 /* Shared API */
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr);
+                       void **cpu_ptr, bool mqd_gfx9);
 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
 void get_local_mem_info(struct kgd_dev *kgd,
                        struct kfd_local_mem_info *mem_info);
index ea3f698aef5eaec1353492b636c81eb2d2f84b78..9803b91f3e77d6bc6859f225eccbfc2b5d71598b 100644 (file)
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 
        while (true) {
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
-               if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+               if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
                        break;
                if (time_after(jiffies, end_jiffies))
                        return -ETIME;
index 693ec5ea4950a8a76653df1085472078314def56..8816c697b2053c7c28f119f1362443d7b9ad6e98 100644 (file)
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                break;
                        case CHIP_POLARIS10:
                                if (type == CGS_UCODE_ID_SMU) {
-                                       if ((adev->pdev->device == 0x67df) &&
-                                           ((adev->pdev->revision == 0xe0) ||
-                                            (adev->pdev->revision == 0xe3) ||
-                                            (adev->pdev->revision == 0xe4) ||
-                                            (adev->pdev->revision == 0xe5) ||
-                                            (adev->pdev->revision == 0xe7) ||
+                                       if (((adev->pdev->device == 0x67df) &&
+                                            ((adev->pdev->revision == 0xe0) ||
+                                             (adev->pdev->revision == 0xe3) ||
+                                             (adev->pdev->revision == 0xe4) ||
+                                             (adev->pdev->revision == 0xe5) ||
+                                             (adev->pdev->revision == 0xe7) ||
+                                             (adev->pdev->revision == 0xef))) ||
+                                           ((adev->pdev->device == 0x6fdf) &&
                                             (adev->pdev->revision == 0xef))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
index 502b94fb116a7070af89ce51da182c3a936e48a0..b31d121a876bf32e64c51ad23ead9b08713f74bb 100644 (file)
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 {
        struct drm_gem_object *gobj;
        unsigned long size;
+       int r;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
 
-       size = amdgpu_bo_size(p->uf_entry.robj);
-       if (size != PAGE_SIZE || (data->offset + 8) > size)
-               return -EINVAL;
-
-       *offset = data->offset;
-
        drm_gem_object_put_unlocked(gobj);
 
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size) {
+               r = -EINVAL;
+               goto error_unref;
+       }
+
        if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
-               amdgpu_bo_unref(&p->uf_entry.robj);
-               return -EINVAL;
+               r = -EINVAL;
+               goto error_unref;
        }
 
+       *offset = data->offset;
+
        return 0;
+
+error_unref:
+       amdgpu_bo_unref(&p->uf_entry.robj);
+       return r;
 }
 
 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
-                       parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
-                       if (!parser->ctx->preamble_presented) {
-                               parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
-                               parser->ctx->preamble_presented = true;
-                       }
-               }
+               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+                       parser->job->preamble_status |=
+                               AMDGPU_PREAMBLE_IB_PRESENT;
 
                if (parser->ring && parser->ring != ring)
                        return -EINVAL;
@@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        int r;
 
+       job = p->job;
+       p->job = NULL;
+
+       r = drm_sched_job_init(&job->base, entity, p->filp);
+       if (r)
+               goto error_unlock;
+
+       /* No memory allocation is allowed while holding the mn lock */
        amdgpu_mn_lock(p->mn);
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = e->robj;
 
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
-                       amdgpu_mn_unlock(p->mn);
-                       return -ERESTARTSYS;
+                       r = -ERESTARTSYS;
+                       goto error_abort;
                }
        }
 
-       job = p->job;
-       p->job = NULL;
-
-       r = drm_sched_job_init(&job->base, entity, p->filp);
-       if (r) {
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
        job->owner = p->filp;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
@@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        amdgpu_cs_post_dependencies(p);
 
+       if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+           !p->ctx->preamble_presented) {
+               job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+               p->ctx->preamble_presented = true;
+       }
+
        cs->out.handle = seq;
        job->uf_sequence = seq;
 
@@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        amdgpu_mn_unlock(p->mn);
 
        return 0;
+
+error_abort:
+       dma_fence_put(&job->base.s_fence->finished);
+       job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+       amdgpu_job_free(job);
+       return r;
 }
 
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
index 8ab5ccbc14ace34f6618452b01432bafd55cb5a3..39bf2ce548c61e2cabb2ad65aa9036d07f963093 100644 (file)
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
 
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_SMC,
-               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_DCE,
                AMD_IP_BLOCK_TYPE_GFX,
                AMD_IP_BLOCK_TYPE_SDMA,
index 8843a06360fa7bd7e9620c76012b83b21e51f8c7..0f41d8647376a23234905751fdef534bfed8bd5a 100644 (file)
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+       {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        /* Polaris12 */
        {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
index 5518e623fed21046791c42e026383526f8cf8adb..51b5e977ca885ef1f7d7df49698f3c6843bab437 100644 (file)
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return r;
        }
 
+       need_ctx_switch = ring->current_ctx != fence_ctx;
        if (ring->funcs->emit_pipeline_sync && job &&
            ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+            (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
             amdgpu_vm_need_pipeline_sync(ring, job))) {
                need_pipe_sync = true;
                dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        }
 
        skip_preamble = ring->current_ctx == fence_ctx;
-       need_ctx_switch = ring->current_ctx != fence_ctx;
        if (job && ring->funcs->emit_cntxcntl) {
                if (need_ctx_switch)
                        status |= AMDGPU_HAVE_CTX_SWITCH;
index 8f98629fbe5936858a3c77b3546fd106577f7254..7b4e657a95c700561298346654118a9915c8caec 100644 (file)
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                        amdgpu_fence_wait_empty(ring);
        }
 
-       mutex_lock(&adev->pm.mutex);
-       /* update battery/ac status */
-       if (power_supply_is_system_supplied() > 0)
-               adev->pm.ac_power = true;
-       else
-               adev->pm.ac_power = false;
-       mutex_unlock(&adev->pm.mutex);
-
        if (adev->powerplay.pp_funcs->dispatch_tasks) {
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
index ece0ac703e277282992422865f9945ce06ca5f0c..b17771dd5ce732620e8c058f788516b7ef9b6fee 100644 (file)
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
         * is validated on next vm use to avoid fault.
         * */
        list_move_tail(&base->vm_status, &vm->evicted);
+       base->moved = true;
 }
 
 /**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        uint64_t addr;
        int r;
 
-       addr = amdgpu_bo_gpu_offset(bo);
        entries = amdgpu_bo_size(bo) / 8;
 
        if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                goto error;
 
+       addr = amdgpu_bo_gpu_offset(bo);
        if (ats_entries) {
                uint64_t ats_value;
 
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
  *
  * @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
  * @fragment_size_default: Default PTE fragment size
  * @max_level: max VMPT level
  * @max_bits: max address space size in bits
  *
  */
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
                           unsigned max_bits)
 {
+       unsigned int max_size = 1 << (max_bits - 30);
+       unsigned int vm_size;
        uint64_t tmp;
 
        /* adjust vm size first */
        if (amdgpu_vm_size != -1) {
-               unsigned max_size = 1 << (max_bits - 30);
-
                vm_size = amdgpu_vm_size;
                if (vm_size > max_size) {
                        dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
                                 amdgpu_vm_size, max_size);
                        vm_size = max_size;
                }
+       } else {
+               struct sysinfo si;
+               unsigned int phys_ram_gb;
+
+               /* Optimal VM size depends on the amount of physical
+                * RAM available. Underlying requirements and
+                * assumptions:
+                *
+                *  - Need to map system memory and VRAM from all GPUs
+                *     - VRAM from other GPUs not known here
+                *     - Assume VRAM <= system memory
+                *  - On GFX8 and older, VM space can be segmented for
+                *    different MTYPEs
+                *  - Need to allow room for fragmentation, guard pages etc.
+                *
+                * This adds up to a rough guess of system memory x3.
+                * Round up to power of two to maximize the available
+                * VM size with the given page table size.
+                */
+               si_meminfo(&si);
+               phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+                              (1 << 30) - 1) >> 30;
+               vm_size = roundup_pow_of_two(
+                       min(max(phys_ram_gb * 3, min_vm_size), max_size));
        }
 
        adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
index 67a15d439ac006f97b48ffe7e74544d6688bf452..9fa9df0c5e7f9a19d1f9aed6e0430ea78aac8675 100644 (file)
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
                           unsigned max_bits);
 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
index 5cd45210113f645062750e4ef54ffd4e7dc14da7..5a9534a82d40911cebb02462ba0cc5a995a5bfca 100644 (file)
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        if (amdgpu_sriov_vf(adev))
                return 0;
 
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_RLC_SMU_HS |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_GFX_DMG))
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        default:
                break;
        }
-
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_RLC_SMU_HS |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_GFX_DMG))
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        return 0;
 }
 
index 75317f283c6967d2de4daaaf5dca4cfdaf9922b7..ad151fefa41f1ed1d6f19ae1783b13b1b76b4f2e 100644 (file)
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
        amdgpu_gart_table_vram_unpin(adev);
 }
 
-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
                                     u32 status, u32 addr, u32 mc_client)
 {
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
-       gmc_v6_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 36dc367c4b45ea86a5a5b575ba357f9717ec92b0..f8d8a3a73e42b31397b97f743ab187da2732cd89 100644 (file)
@@ -746,19 +746,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
        amdgpu_gart_table_vram_unpin(adev);
 }
 
-/**
- * gmc_v7_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 /**
  * gmc_v7_0_vm_decode_fault - print human readable fault info
  *
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        kfree(adev->gmc.vm_fault_info);
-       gmc_v7_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 70fc97b59b4f2dcf157b49885c8356fedfca05a3..9333109b210de810119f0d15d94ec5d125a84cf7 100644 (file)
@@ -968,19 +968,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
        amdgpu_gart_table_vram_unpin(adev);
 }
 
-/**
- * gmc_v8_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 /**
  * gmc_v8_0_vm_decode_fault - print human readable fault info
  *
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        kfree(adev->gmc.vm_fault_info);
-       gmc_v8_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 399a5db27649728686868550502089391f3b0807..72f8018fa2a836572b9c898785bb99deecc1ca91 100644 (file)
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
        return 0;
 }
 
-/**
- * gmc_v9_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 static int gmc_v9_0_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
-       gmc_v9_0_gart_fini(adev);
 
        /*
        * TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
        */
        amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
 
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
 
        return 0;
 }
index 3f57f6463dc880c797429d9f3080894b929e5eff..cb79a93c2eb73a5f23fb008cee50e80325ada627 100644 (file)
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
                                            int min_temp, int max_temp);
 static int kv_init_fps_limits(struct amdgpu_device *adev);
 
-static void kv_dpm_powergate_uvd(void *handle, bool gate);
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
 
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
                return ret;
        }
 
-       kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
        if (adev->irq.installed &&
            amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
                ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
 
 static void kv_dpm_disable(struct amdgpu_device *adev)
 {
+       struct kv_power_info *pi = kv_get_pi(adev);
+
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
                       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
        /* powerup blocks */
        kv_dpm_powergate_acp(adev, false);
        kv_dpm_powergate_samu(adev, false);
-       kv_dpm_powergate_vce(adev, false);
-       kv_dpm_powergate_uvd(adev, false);
+       if (pi->caps_vce_pg) /* power on the VCE block */
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+       if (pi->caps_uvd_pg) /* power on the UVD block */
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
 
        kv_enable_smc_cac(adev, false);
        kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
        int ret;
 
        if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
-               kv_dpm_powergate_vce(adev, false);
                if (pi->caps_stable_p_state)
                        pi->vce_boot_level = table->count - 1;
                else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
                kv_enable_vce_dpm(adev, true);
        } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
                kv_enable_vce_dpm(adev, false);
-               kv_dpm_powergate_vce(adev, true);
        }
 
        return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
        }
 }
 
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_vce(void *handle, bool gate)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct kv_power_info *pi = kv_get_pi(adev);
-
-       if (pi->vce_power_gated == gate)
-               return;
+       int ret;
 
        pi->vce_power_gated = gate;
 
-       if (!pi->caps_vce_pg)
-               return;
-
-       if (gate)
-               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
-       else
-               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+       if (gate) {
+               /* stop the VCE block */
+               ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                            AMD_PG_STATE_GATE);
+               kv_enable_vce_dpm(adev, false);
+               if (pi->caps_vce_pg) /* power off the VCE block */
+                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+       } else {
+               if (pi->caps_vce_pg) /* power on the VCE block */
+                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+               kv_enable_vce_dpm(adev, true);
+               /* re-init the VCE block */
+               ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                            AMD_PG_STATE_UNGATE);
+       }
 }
 
+
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
 {
        struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
        else
                adev->pm.dpm_enabled = true;
        mutex_unlock(&adev->pm.mutex);
-
+       amdgpu_pm_compute_clocks(adev);
        return ret;
 }
 
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
        case AMD_IP_BLOCK_TYPE_UVD:
                kv_dpm_powergate_uvd(handle, gate);
                break;
+       case AMD_IP_BLOCK_TYPE_VCE:
+               kv_dpm_powergate_vce(handle, gate);
+               break;
        default:
                break;
        }
index e7ca4623cfb946f41b0e3e1ffb107a3f295daedb..7c3b634d8d5f4117698ca46542eba887b85c4692 100644 (file)
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
index db327b4125626d411e155de18bfeb28f4efb92b4..1de96995e6900c934c91cf610160768b6c08cd37 100644 (file)
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
 
        si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
        si_thermal_start_thermal_controller(adev);
-       ni_update_current_ps(adev, boot_ps);
 
        return 0;
 }
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
        else
                adev->pm.dpm_enabled = true;
        mutex_unlock(&adev->pm.mutex);
-
+       amdgpu_pm_compute_clocks(adev);
        return ret;
 }
 
index 1b048715ab8a1df42a391a527823b98212534be5..29ac74f40dceb729436907215427298ae9190909 100644 (file)
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
        if (kfd->kfd2kgd->init_gtt_mem_allocation(
                        kfd->kgd, size, &kfd->gtt_mem,
-                       &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+                       &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+                       false)) {
                dev_err(kfd_device, "Could not allocate %d bytes\n", size);
                goto out;
        }
index 7a61f38c09e65bfd9b33d668799efd9d79ab7aab..01494752c36a8c7ba08ce83569677f47e828723c 100644 (file)
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
        struct amd_iommu_device_info iommu_info;
        unsigned int pasid_limit;
        int err;
+       struct kfd_topology_device *top_dev;
 
-       if (!kfd->device_info->needs_iommu_device)
+       top_dev = kfd_topology_device_by_id(kfd->id);
+
+       /*
+        * Overwrite ATS capability according to needs_iommu_device to fix
+        * potential missing corresponding bit in CRAT of BIOS.
+        */
+       if (!kfd->device_info->needs_iommu_device) {
+               top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
                return 0;
+       }
+
+       top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
 
        iommu_info.flags = 0;
        err = amd_iommu_device_info(kfd->pdev, &iommu_info);
index f5fc3675f21eda3d7c769aed2130a9360382afad..0cedb37cf513563dc6fea50e6b40ef0889c3bb61 100644 (file)
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
                                ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
                        &((*mqd_mem_obj)->gtt_mem),
                        &((*mqd_mem_obj)->gpu_addr),
-                       (void *)&((*mqd_mem_obj)->cpu_ptr));
+                       (void *)&((*mqd_mem_obj)->cpu_ptr), true);
        } else
                retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
                                mqd_mem_obj);
index f971710f1c91b2edec3670f0a36e837ef947190c..92b285ca73aabb1b225f37e17bd36a9f05e9641e 100644 (file)
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
 int kfd_topology_remove_device(struct kfd_dev *gpu);
 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
                                                uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
index bc95d4dfee2e930800426c85bd5c6831efc6294f..80f5db4ef75fd03ded8df14abce8348964386d69 100644 (file)
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
        return device;
 }
 
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
 {
-       struct kfd_topology_device *top_dev;
-       struct kfd_dev *device = NULL;
+       struct kfd_topology_device *top_dev = NULL;
+       struct kfd_topology_device *ret = NULL;
 
        down_read(&topology_lock);
 
        list_for_each_entry(top_dev, &topology_device_list, list)
                if (top_dev->gpu_id == gpu_id) {
-                       device = top_dev->gpu;
+                       ret = top_dev;
                        break;
                }
 
        up_read(&topology_lock);
 
-       return device;
+       return ret;
+}
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+       struct kfd_topology_device *top_dev;
+
+       top_dev = kfd_topology_device_by_id(gpu_id);
+       if (!top_dev)
+               return NULL;
+
+       return top_dev->gpu;
 }
 
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
index fbe878ae1e8c579cc6e2571e30e95400ba477cad..4ba0003a9d329545a59e1b8050bcecf922a5e55d 100644 (file)
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
 {
        struct dc_context *ctx = pp->ctx;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct pp_display_clock_request clock = {0};
 
-       if (!pp_funcs || !pp_funcs->display_configuration_changed)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return;
 
-       amdgpu_dpm_display_configuration_changed(adev);
+       clock.clock_type = amd_pp_dcf_clock;
+       clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+       pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+
+       clock.clock_type = amd_pp_f_clock;
+       clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+       pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 }
 
 void pp_rv_set_wm_ranges(struct pp_smu *pp,
index 567867915d32d84cb6bb68d3e4da33842de9b2e4..37eaf72ace549d6f132b9fc5933da434fc164396 100644 (file)
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
                         * fail-safe mode
                         */
                        if (dc_is_hdmi_signal(link->connector_signal) ||
-                           dc_is_dvi_signal(link->connector_signal))
+                           dc_is_dvi_signal(link->connector_signal)) {
+                               if (prev_sink != NULL)
+                                       dc_sink_release(prev_sink);
+
                                return false;
+                       }
                default:
                        break;
                }
index 14391b06080ce4566ebbae1d8fd1a8313ac36bc8..43b82e14007ef0d7d1f9ac9bf8d8a121cb490c28 100644 (file)
@@ -292,7 +292,7 @@ struct tile_config {
 struct kfd2kgd_calls {
        int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
                                        void **mem_obj, uint64_t *gpu_addr,
-                                       void **cpu_ptr);
+                                       void **cpu_ptr, bool mqd_gfx9);
 
        void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
 
index 3eb061e11e2efb2caa36738b6425d2f789c19f6d..018fcdb353d254293456b613af5ed0e25ec07caa 100644 (file)
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
 
-       if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+       if (!drm_drv_uses_atomic_modeset(dev))
                return;
 
        list_for_each_entry(plane, &config->plane_list, head) {
index 6f28fe58f1696ca5010915149173e966df35ffc7..373bd4c2b698ba1ebcb3b4572cb6c84b3ce448fe 100644 (file)
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                return ret;
        }
 
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+       if (drm_drv_uses_atomic_modeset(dev)) {
                ret = drm_atomic_debugfs_init(minor);
                if (ret) {
                        DRM_ERROR("Failed to create atomic debugfs files\n");
index 4b0dd20bccb8f3cc2b7da96eb4c4203dabf2815f..16ec93b75dbfaf87fca050d84dfe8ee292226424 100644 (file)
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 {
        int c, o;
        struct drm_connector *connector;
-       const struct drm_connector_helper_funcs *connector_funcs;
        int my_score, best_score, score;
        struct drm_fb_helper_crtc **crtcs, *crtc;
        struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        if (drm_has_preferred_mode(fb_helper_conn, width, height))
                my_score++;
 
-       connector_funcs = connector->helper_private;
-
        /*
         * select a crtc for this connector and then attempt to configure
         * remaining connectors
index 6e3f56684f4ec03e7285b9e3715688ce31de311d..51ed99a37803310e2d9b7e84882ae7df97e3b59c 100644 (file)
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
-               switch (info->drm_format_mod << 10) {
-               case PLANE_CTL_TILED_LINEAR:
+               switch (info->drm_format_mod) {
+               case DRM_FORMAT_MOD_LINEAR:
                        tiling_mode = I915_TILING_NONE;
                        break;
-               case PLANE_CTL_TILED_X:
+               case I915_FORMAT_MOD_X_TILED:
                        tiling_mode = I915_TILING_X;
                        stride = info->stride;
                        break;
-               case PLANE_CTL_TILED_Y:
+               case I915_FORMAT_MOD_Y_TILED:
+               case I915_FORMAT_MOD_Yf_TILED:
                        tiling_mode = I915_TILING_Y;
                        stride = info->stride;
                        break;
                default:
-                       gvt_dbg_core("not supported tiling mode\n");
+                       gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+                                    info->drm_format_mod);
                }
                obj->tiling_and_stride = tiling_mode | stride;
        } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->height = p.height;
                info->stride = p.stride;
                info->drm_format = p.drm_format;
-               info->drm_format_mod = p.tiled;
+
+               switch (p.tiled) {
+               case PLANE_CTL_TILED_LINEAR:
+                       info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       break;
+               case PLANE_CTL_TILED_YF:
+                       info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       break;
+               default:
+                       gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+               }
+
                info->size = (((p.stride * p.height * p.bpp) / 8) +
-                               (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
index face664be3e8e8bf673e589c7ff9b176f8102a76..481896fb712abf4c178b28af2f224a369d0aefd9 100644 (file)
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) {
-               plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
-               _PLANE_CTL_TILED_SHIFT;
+               plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
                        val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                return  -EINVAL;
        }
 
-       plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+       plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
                (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) ?
index cb055f3c81a29c629f2dcc327ce0da9a1b827ee3..60c155085029cbb4742341a94d4d24a56da3d0c5 100644 (file)
@@ -101,7 +101,7 @@ struct intel_gvt;
 /* color space conversion and gamma correction are not included */
 struct intel_vgpu_primary_plane_format {
        u8      enabled;        /* plane is enabled */
-       u8      tiled;          /* X-tiled */
+       u32     tiled;          /* tiling mode: linear, X-tiled, Y tiled, etc */
        u8      bpp;            /* bits per pixel */
        u32     hw_format;      /* format field in the PRI_CTL register */
        u32     drm_format;     /* format in DRM definition */
index 7a58ca5551977a086ce8b25dbe18aa26497f45bc..94c1089ecf59ee46178a6e73be6f73d6307936a0 100644 (file)
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       write_vreg(vgpu, offset, p_data, bytes);
+
+       if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+               vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+       else
+               vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+       return 0;
+}
+
 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
        unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
        u32 v = *(u32 *)p_data;
        u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
 
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+       switch (offset) {
+       case _PHY_CTL_FAMILY_EDP:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+               break;
+       case _PHY_CTL_FAMILY_DDI:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+               break;
+       }
 
        vgpu_vreg(vgpu, offset) = v;
 
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
                skl_power_well_ctl_write);
 
+       MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
        MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
                NULL, gen9_trtte_write);
        MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
        MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
-               NULL, NULL);
+                NULL, NULL);
+       MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
        MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
@@ -3189,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
        MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
 
        MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+       MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
 
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
index a45f46d8537f15bd187fd4195d7de2c6b6dffd2a..9ad89e38f6c07643f2176afade5c223388ada755 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/device.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       if (kthread)
+       if (kthread) {
+               if (!mmget_not_zero(kvm->mm))
+                       return -EFAULT;
                use_mm(kvm->mm);
+       }
 
        idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
        srcu_read_unlock(&kvm->srcu, idx);
 
-       if (kthread)
+       if (kthread) {
                unuse_mm(kvm->mm);
+               mmput(kvm->mm);
+       }
 
        return ret;
 }
@@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
 {
        struct kvmgt_guest_info *info;
        struct kvm *kvm;
+       int idx;
+       bool ret;
 
        if (!handle_valid(handle))
                return false;
@@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       return kvm_is_visible_gfn(kvm, gfn);
+       idx = srcu_read_lock(&kvm->srcu);
+       ret = kvm_is_visible_gfn(kvm, gfn);
+       srcu_read_unlock(&kvm->srcu, idx);
 
+       return ret;
 }
 
 struct intel_gvt_mpt kvmgt_mpt = {
index 994366035364b7576db8ed2ec1036d417cc39d9c..9bb9a85c992ca16a13552af798d8e47e3061dd52 100644 (file)
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 
                /* set the bit 0:2(Core C-State ) to C0 */
                vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+               if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+                       vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
+                                   ~(BIT(0) | BIT(1));
+                       vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+                                   ~PHY_POWER_GOOD;
+                       vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+                                   ~PHY_POWER_GOOD;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
+                                   ~BIT(30);
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
+                                   ~BIT(30);
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+               }
        } else {
 #define GVT_GEN8_MMIO_RESET_OFFSET             (0x44200)
                /* only reset the engine related, so starting with 0x44200
index 42e1e6bdcc2cfe64a3446eea8019b9a912141ba0..e872f4847fbe0ce9a1e646bbea84eb0b6b2e1f29 100644 (file)
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
         * performace for batch mmio read/write, so we need
         * handle forcewake mannually.
         */
-       intel_runtime_pm_get(dev_priv);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
        switch_mmio(pre, next, ring_id);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
 }
 
 /**
index fa75a2eead9070fbfd1894b659b37f84eed9e11a..b0d3a43ccd033761dfefdf077e09526a98aff3c0 100644 (file)
@@ -42,8 +42,6 @@
 #define DEVICE_TYPE_EFP3   0x20
 #define DEVICE_TYPE_EFP4   0x10
 
-#define DEV_SIZE       38
-
 struct opregion_header {
        u8 signature[16];
        u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
        u16 size; /* data size */
 } __packed;
 
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
 struct efp_child_device_config {
        u16 handle;
        u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
        u8 mipi_bridge_type; /* 171 */
        u16 device_class_ext;
        u8 dvo_function;
-       u8 dp_usb_type_c:1; /* 195 */
-       u8 skip6:7;
-       u8 dp_usb_type_c_2x_gpio_index; /* 195 */
-       u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
-       u8 iboost_dp:4; /* 196 */
-       u8 iboost_hdmi:4; /* 196 */
 } __packed;
 
 struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
-       v->bdb_header.version = 186; /* child_dev_size = 38 */
+       v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
        v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
 
        /* child device */
        num_child = 4; /* each port has one child */
+       v->general_definitions.child_dev_size =
+               sizeof(struct efp_child_device_config);
        v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
        /* size will include child devices */
        v->general_definitions_header.size =
-               sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
-       v->general_definitions.child_dev_size = DEV_SIZE;
+               sizeof(struct bdb_general_definitions) +
+                       num_child * v->general_definitions.child_dev_size;
 
        /* portA */
        v->child0.handle = DEVICE_TYPE_EFP1;
index 09d7bb72b4ff30e45688add463c1fdaaa9a71ad1..c32e7d5e862914b787d7cf5cbf706cb5c4a41748 100644 (file)
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
        return false;
 }
 
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME  2
+
 struct vgpu_sched_data {
        struct list_head lru_list;
        struct intel_vgpu *vgpu;
        bool active;
-
+       bool pri_sched;
+       ktime_t pri_time;
        ktime_t sched_in_time;
        ktime_t sched_time;
        ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
                if (!vgpu_has_pending_workload(vgpu_data->vgpu))
                        continue;
 
+               if (vgpu_data->pri_sched) {
+                       if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+                               vgpu = vgpu_data->vgpu;
+                               break;
+                       } else
+                               vgpu_data->pri_sched = false;
+               }
+
                /* Return the vGPU only if it has time slice left */
                if (vgpu_data->left_ts > 0) {
                        vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct vgpu_sched_data *vgpu_data;
        struct intel_vgpu *vgpu = NULL;
+
        /* no active vgpu or has already had a target */
        if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
                goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        vgpu = find_busy_vgpu(sched_data);
        if (vgpu) {
                scheduler->next_vgpu = vgpu;
-
-               /* Move the last used vGPU to the tail of lru_list */
                vgpu_data = vgpu->sched_data;
-               list_del_init(&vgpu_data->lru_list);
-               list_add_tail(&vgpu_data->lru_list,
-                               &sched_data->lru_runq_head);
+               if (!vgpu_data->pri_sched) {
+                       /* Move the last used vGPU to the tail of lru_list */
+                       list_del_init(&vgpu_data->lru_list);
+                       list_add_tail(&vgpu_data->lru_list,
+                                     &sched_data->lru_runq_head);
+               }
        } else {
                scheduler->next_vgpu = gvt->idle_vgpu;
        }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 {
        struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       ktime_t now;
 
        if (!list_empty(&vgpu_data->lru_list))
                return;
 
-       list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+       now = ktime_get();
+       vgpu_data->pri_time = ktime_add(now,
+                                       ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+       vgpu_data->pri_sched = true;
+
+       list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
 
        if (!hrtimer_active(&sched_data->timer))
                hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                &vgpu->gvt->scheduler;
        int ring_id;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        if (!vgpu_data->active)
                return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->current_vgpu = NULL;
        }
 
+       intel_runtime_pm_get(dev_priv);
        spin_lock_bh(&scheduler->mmio_context_lock);
        for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
                if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }
index a4e8e3cf74fd58b400331bc608516e9414e458d3..c628be05fbfe907a1bce89fd4727df79906fe63f 100644 (file)
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_submission(vgpu);
        intel_vgpu_clean_display(vgpu);
        intel_vgpu_clean_opregion(vgpu);
+       intel_vgpu_reset_ggtt(vgpu, true);
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
index 08ec7446282e7f981f74272e8c29c755abad9e21..9e63cd47b60f30a5b0f7682d2c3038dd81e2b3ce 100644 (file)
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
 #define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
 #define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
 #define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
 #define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
index 11d834f942205f37c10c1a95a345b8908cb3411f..98358b4b36dea7e13177bdf38554ffaad4f994e9 100644 (file)
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
                vma->flags |= I915_VMA_GGTT;
                list_add(&vma->obj_link, &obj->vma_list);
        } else {
-               i915_ppgtt_get(i915_vm_to_ppgtt(vm));
                list_add_tail(&vma->obj_link, &obj->vma_list);
        }
 
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
        if (vma->obj)
                rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
-       if (!i915_vma_is_ggtt(vma))
-               i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
        rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
                GEM_BUG_ON(i915_gem_active_isset(&iter->base));
                kfree(iter);
index b725835b47efc5116b53e3f27eeb6e0fcdbb65b4..769f3f5866611174cbabeca5e4d1fbb0711b9b86 100644 (file)
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       if (INTEL_INFO(dev_priv)->num_pipes == 0)
-               return;
-
        ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        if (ret < 0) {
                DRM_ERROR("failed to add audio component (%d)\n", ret);
index 8761513f3532c5c4bfb56151833ab6217a5ab99f..c9af34861d9e3a5bf3c090f4d5cf7afafed83dfa 100644 (file)
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       if (!is_mst)
+               intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
-       intel_ddi_disable_pipe_clock(old_crtc_state);
-
-       /*
-        * Power down sink before disabling the port, otherwise we end
-        * up getting interrupts from the sink on detecting link loss.
-        */
-       if (!is_mst)
+       if (!is_mst) {
+               intel_ddi_disable_pipe_clock(old_crtc_state);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       }
 
        intel_disable_ddi_buf(encoder);
 
index ed3fa1c8a98342d549ec8bf5b027b3b783affa08..d2951096bca0d48caaf10abf1827ef81152abdc8 100644 (file)
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
        int w = drm_rect_width(&plane_state->base.src) >> 16;
        int h = drm_rect_height(&plane_state->base.src) >> 16;
        int dst_x = plane_state->base.dst.x1;
+       int dst_w = drm_rect_width(&plane_state->base.dst);
        int pipe_src_w = crtc_state->pipe_src_w;
        int max_width = skl_max_plane_width(fb, 0, rotation);
        int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
         * screen may cause FIFO underflow and display corruption.
         */
        if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-           (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+           (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
                DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
-                             dst_x + w < 4 ? "end" : "start",
-                             dst_x + w < 4 ? dst_x + w : dst_x,
+                             dst_x + dst_w < 4 ? "end" : "start",
+                             dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
                              4, pipe_src_w - 4);
                return -ERANGE;
        }
@@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->pcu_lock);
-               /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
-                                           42))
+                                           100))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
index cd0f649b57a5b75dff70265637a3a4b0ead4373b..1193202766a2cb86721cc8aa918c5f692c822c7e 100644 (file)
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+                   struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
                                return ret;
                }
 
-               status = intel_dp_long_pulse(intel_dp->attached_connector);
+               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
        }
 
        intel_dp->detect_done = false;
index 7e3e01607643d3e6b4dfc6332284be001966e280..4ecd65375603391ee02f6a11831feecdae397b0b 100644 (file)
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /* this can fail */
        drm_dp_check_act_status(&intel_dp->mst_mgr);
        /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        I915_WRITE(DP_TP_STATUS(port), temp);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+       intel_ddi_enable_pipe_clock(pipe_config);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
index a9076402dcb0864ab6b7c6c4ea1d8346c95e949b..192972a7d287e9fd5ff9aeb599d6730007383500 100644 (file)
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
 
        ret = i2c_transfer(adapter, &msg, 1);
        if (ret == 1)
-               return 0;
-       return ret >= 0 ? -EIO : ret;
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       kfree(write_buf);
+       return ret;
 }
 
 static
index 5dae16ccd9f1015fe80abc2c41d98937f3aaef89..3e085c5f2b81bfa87daf161bd96e3eb01da5d4ac 100644 (file)
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
        DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
                      lspcon_mode_name(mode));
 
-       wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
+       wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
        if (current_mode != mode)
                DRM_ERROR("LSPCON mode hasn't settled\n");
 
index c2f10d8993296471833e4a5d8a0cd69463a992b4..443dfaefd7a6b413c4664b132fe5db59a9297f24 100644 (file)
@@ -181,8 +181,9 @@ struct intel_overlay {
        u32 brightness, contrast, saturation;
        u32 old_xscale, old_yscale;
        /* register access */
-       u32 flip_addr;
        struct drm_i915_gem_object *reg_bo;
+       struct overlay_registers __iomem *regs;
+       u32 flip_addr;
        /* flip handling */
        struct i915_gem_active last_flip;
 };
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
                                  PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
 }
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
-                                        overlay->flip_addr,
-                                        PAGE_SIZE);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-                                    struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap(regs);
-}
-
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
                                         i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      struct drm_i915_gem_object *new_bo,
                                      struct put_image_params *params)
 {
-       int ret, tmp_width;
-       struct overlay_registers __iomem *regs;
-       bool scale_changed = false;
+       struct overlay_registers __iomem *regs = overlay->regs;
        struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
+       bool scale_changed = false;
        struct i915_vma *vma;
+       int ret, tmp_width;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        if (!overlay->active) {
                u32 oconfig;
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unpin;
-               }
+
                oconfig = OCONF_CC_OUT_8BIT;
                if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                iowrite32(oconfig, &regs->OCONFIG);
-               intel_overlay_unmap_regs(overlay, regs);
 
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs) {
-               ret = -ENOMEM;
-               goto out_unpin;
-       }
-
        iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
        iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        iowrite32(overlay_cmd_reg(params), &regs->OCMD);
 
-       intel_overlay_unmap_regs(overlay, regs);
-
        ret = intel_overlay_continue(overlay, vma, scale_changed);
        if (ret)
                goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       regs = intel_overlay_map_regs(overlay);
-       iowrite32(0, &regs->OCMD);
-       intel_overlay_unmap_regs(overlay, regs);
+       iowrite32(0, &overlay->regs->OCMD);
 
        return intel_overlay_off(overlay);
 }
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
        struct drm_intel_overlay_attrs *attrs = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                overlay->contrast   = attrs->contrast;
                overlay->saturation = attrs->saturation;
 
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-
-               update_reg_attrs(overlay, regs);
-
-               intel_overlay_unmap_regs(overlay, regs);
+               update_reg_attrs(overlay, overlay->regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
                        if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
        return ret;
 }
 
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+       if (obj == NULL)
+               obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_put_bo;
+       }
+
+       if (use_phys)
+               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+       else
+               overlay->flip_addr = i915_ggtt_offset(vma);
+       overlay->regs = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+
+       if (IS_ERR(overlay->regs)) {
+               err = PTR_ERR(overlay->regs);
+               goto err_put_bo;
+       }
+
+       overlay->reg_bo = obj;
+       return 0;
+
+err_put_bo:
+       i915_gem_object_put(obj);
+       return err;
+}
+
 void intel_setup_overlay(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
-       struct drm_i915_gem_object *reg_bo;
-       struct overlay_registers __iomem *regs;
-       struct i915_vma *vma = NULL;
        int ret;
 
        if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       if (WARN_ON(dev_priv->overlay))
-               goto out_free;
-
        overlay->i915 = dev_priv;
 
-       reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
-       if (IS_ERR(reg_bo))
-               goto out_free;
-       overlay->reg_bo = reg_bo;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
-               ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
-               if (ret) {
-                       DRM_ERROR("failed to attach phys overlay regs\n");
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = reg_bo->phys_handle->busaddr;
-       } else {
-               vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
-                                              0, PAGE_SIZE, PIN_MAPPABLE);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin overlay register bo\n");
-                       ret = PTR_ERR(vma);
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = i915_ggtt_offset(vma);
-
-               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
-               if (ret) {
-                       DRM_ERROR("failed to move overlay register bo into the GTT\n");
-                       goto out_unpin_bo;
-               }
-       }
-
-       /* init all values */
        overlay->color_key = 0x0101fe;
        overlay->color_key_enabled = true;
        overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 
        init_request_active(&overlay->last_flip, NULL);
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs)
-               goto out_unpin_bo;
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+       if (ret)
+               goto out_free;
+
+       ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+       if (ret)
+               goto out_reg_bo;
 
-       memset_io(regs, 0, sizeof(struct overlay_registers));
-       update_polyphase_filter(regs);
-       update_reg_attrs(overlay, regs);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       intel_overlay_unmap_regs(overlay, regs);
+       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+       update_polyphase_filter(overlay->regs);
+       update_reg_attrs(overlay, overlay->regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       DRM_INFO("initialized overlay support\n");
+       DRM_INFO("Initialized overlay support.\n");
        return;
 
-out_unpin_bo:
-       if (vma)
-               i915_vma_unpin(vma);
-out_free_bo:
-       i915_gem_object_put(reg_bo);
+out_reg_bo:
+       i915_gem_object_put(overlay->reg_bo);
 out_free:
        mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
-       return;
 }
 
 void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 {
-       if (!dev_priv->overlay)
+       struct intel_overlay *overlay;
+
+       overlay = fetch_and_zero(&dev_priv->overlay);
+       if (!overlay)
                return;
 
-       /* The bo's should be free'd by the generic code already.
+       /*
+        * The bo's should be free'd by the generic code already.
         * Furthermore modesetting teardown happens beforehand so the
-        * hardware should be off already */
-       WARN_ON(dev_priv->overlay->active);
+        * hardware should be off already.
+        */
+       WARN_ON(overlay->active);
+
+       i915_gem_object_put(overlay->reg_bo);
 
-       i915_gem_object_put(dev_priv->overlay->reg_bo);
-       kfree(dev_priv->overlay);
+       kfree(overlay);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
        u32 isr;
 };
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               /* Cast to make sparse happy, but it's wc memory anyway, so
-                * equivalent to the wc io mapping on X86. */
-               regs = (struct overlay_registers __iomem *)
-                       overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
-                                               overlay->flip_addr);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-                                       struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap_atomic(regs);
-}
-
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
-       struct overlay_registers __iomem *regs;
 
        if (!overlay || !overlay->active)
                return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
        error->isr = I915_READ(ISR);
        error->base = overlay->flip_addr;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
-       if (!regs)
-               goto err;
-
-       memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay, regs);
+       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
 
        return error;
-
-err:
-       kfree(error);
-       return NULL;
 }
 
 void
index 978782a776292e6d954755cc71786bd98b5d81ac..28d191192945b0690fd76eb9aa97a1bb3637215f 100644 (file)
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
        writel(0x0, comp->regs + DISP_REG_OVL_RST);
 }
 
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+       return 4;
+}
+
 static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
 {
        unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
 
 static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
 {
+       /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+        * is defined in mediatek HW data sheet.
+        * The alphabet order in XXX is no relation to data
+        * arrangement in memory.
+        */
        switch (fmt) {
        default:
        case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
        .stop = mtk_ovl_stop,
        .enable_vblank = mtk_ovl_enable_vblank,
        .disable_vblank = mtk_ovl_disable_vblank,
+       .layer_nr = mtk_ovl_layer_nr,
        .layer_on = mtk_ovl_layer_on,
        .layer_off = mtk_ovl_layer_off,
        .layer_config = mtk_ovl_layer_config,
index 585943c81e1f8818de1a6bb481f6adb90c8ad308..b0a5cffe345ab77f90e443ca88bfac52d62804a4 100644 (file)
 #define RDMA_REG_UPDATE_INT                            BIT(0)
 #define DISP_REG_RDMA_GLOBAL_CON               0x0010
 #define RDMA_ENGINE_EN                                 BIT(0)
+#define RDMA_MODE_MEMORY                               BIT(1)
 #define DISP_REG_RDMA_SIZE_CON_0               0x0014
+#define RDMA_MATRIX_ENABLE                             BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL                                GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB               (6 << 20)
 #define DISP_REG_RDMA_SIZE_CON_1               0x0018
 #define DISP_REG_RDMA_TARGET_LINE              0x001c
+#define DISP_RDMA_MEM_CON                      0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565                   (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888                   (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888                 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888                 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY                     (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV                     (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP                            BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH                        0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0            0x0030
 #define DISP_REG_RDMA_FIFO_CON                 0x0040
 #define RDMA_FIFO_UNDERFLOW_EN                         BIT(31)
 #define RDMA_FIFO_PSEUDO_SIZE(bytes)                   (((bytes) / 16) << 16)
 #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes)                ((bytes) / 16)
 #define RDMA_FIFO_SIZE(rdma)                   ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR               0x0f00
+
+#define RDMA_MEM_GMC                           0x40402020
 
 struct mtk_disp_rdma_data {
        unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
        writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
 }
 
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+                                    unsigned int fmt)
+{
+       /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+        * is defined in mediatek HW data sheet.
+        * The alphabet order in XXX is no relation to data
+        * arrangement in memory.
+        */
+       switch (fmt) {
+       default:
+       case DRM_FORMAT_RGB565:
+               return MEM_MODE_INPUT_FORMAT_RGB565;
+       case DRM_FORMAT_BGR565:
+               return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_RGB888:
+               return MEM_MODE_INPUT_FORMAT_RGB888;
+       case DRM_FORMAT_BGR888:
+               return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_RGBA8888:
+               return MEM_MODE_INPUT_FORMAT_ARGB8888;
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_BGRA8888:
+               return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               return MEM_MODE_INPUT_FORMAT_RGBA8888;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_UYVY:
+               return MEM_MODE_INPUT_FORMAT_UYVY;
+       case DRM_FORMAT_YUYV:
+               return MEM_MODE_INPUT_FORMAT_YUYV;
+       }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+       return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+                                 struct mtk_plane_state *state)
+{
+       struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+       struct mtk_plane_pending_state *pending = &state->pending;
+       unsigned int addr = pending->addr;
+       unsigned int pitch = pending->pitch & 0xffff;
+       unsigned int fmt = pending->format;
+       unsigned int con;
+
+       con = rdma_fmt_convert(rdma, fmt);
+       writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+       if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_INT_MTX_SEL,
+                                RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+       } else {
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_ENABLE, 0);
+       }
+
+       writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+       writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+       writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+       rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+                        RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
 static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
        .config = mtk_rdma_config,
        .start = mtk_rdma_start,
        .stop = mtk_rdma_stop,
        .enable_vblank = mtk_rdma_enable_vblank,
        .disable_vblank = mtk_rdma_disable_vblank,
+       .layer_nr = mtk_rdma_layer_nr,
+       .layer_config = mtk_rdma_layer_config,
 };
 
 static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
index 2d6aa150a9ff08f0c0be658593abe1114d44a514..0b976dfd04df0b0c8b71bb0c6b2c471027a2e552 100644 (file)
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
        bool                            pending_needs_vblank;
        struct drm_pending_vblank_event *event;
 
-       struct drm_plane                planes[OVL_LAYER_NR];
+       struct drm_plane                *planes;
+       unsigned int                    layer_nr;
        bool                            pending_planes;
 
        void __iomem                    *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
 
-       mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+       mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
 
        return 0;
 }
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
 
-       mtk_ddp_comp_disable_vblank(ovl);
+       mtk_ddp_comp_disable_vblank(comp);
 }
 
 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        }
 
        /* Initially configure all planes */
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        unsigned int i;
 
        /*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
         * queue update module registers on vblank.
         */
        if (state->pending_config) {
-               mtk_ddp_comp_config(ovl, state->pending_width,
+               mtk_ddp_comp_config(comp, state->pending_width,
                                    state->pending_height,
                                    state->pending_vrefresh, 0);
 
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
        }
 
        if (mtk_crtc->pending_planes) {
-               for (i = 0; i < OVL_LAYER_NR; i++) {
+               for (i = 0; i < mtk_crtc->layer_nr; i++) {
                        struct drm_plane *plane = &mtk_crtc->planes[i];
                        struct mtk_plane_state *plane_state;
 
                        plane_state = to_mtk_plane_state(plane->state);
 
                        if (plane_state->pending.config) {
-                               mtk_ddp_comp_layer_config(ovl, i, plane_state);
+                               mtk_ddp_comp_layer_config(comp, i, plane_state);
                                plane_state->pending.config = false;
                        }
                }
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
                                       struct drm_crtc_state *old_state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        int ret;
 
        DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
 
-       ret = mtk_smi_larb_get(ovl->larb_dev);
+       ret = mtk_smi_larb_get(comp->larb_dev);
        if (ret) {
                DRM_ERROR("Failed to get larb: %d\n", ret);
                return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 
        ret = mtk_crtc_ddp_hw_init(mtk_crtc);
        if (ret) {
-               mtk_smi_larb_put(ovl->larb_dev);
+               mtk_smi_larb_put(comp->larb_dev);
                return;
        }
 
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
                                        struct drm_crtc_state *old_state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        int i;
 
        DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
                return;
 
        /* Set all pending plane state to disabled */
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 
        drm_crtc_vblank_off(crtc);
        mtk_crtc_ddp_hw_fini(mtk_crtc);
-       mtk_smi_larb_put(ovl->larb_dev);
+       mtk_smi_larb_put(comp->larb_dev);
 
        mtk_crtc->enabled = false;
 }
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 
        if (mtk_crtc->event)
                mtk_crtc->pending_needs_vblank = true;
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -516,7 +517,7 @@ err_cleanup_crtc:
        return ret;
 }
 
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                mtk_crtc->ddp_comp[i] = comp;
        }
 
-       for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+       mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+       mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+                                       sizeof(struct drm_plane),
+                                       GFP_KERNEL);
+
+       for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
                type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
                                (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
                                                DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        }
 
        ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
-                               &mtk_crtc->planes[1], pipe);
+                               mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+                               NULL, pipe);
        if (ret < 0)
                goto unprepare;
        drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
index 9d9410c67ae9eb2026181b4a361e5c4e5707028d..091adb2087ebad132742195a4490e09efae4f427 100644 (file)
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_plane.h"
 
-#define OVL_LAYER_NR   4
 #define MTK_LUT_SIZE   512
 #define MTK_MAX_BPC    10
 #define MTK_MIN_BPC    3
 
 void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
 int mtk_drm_crtc_create(struct drm_device *drm_dev,
                        const enum mtk_ddp_comp_id *path,
                        unsigned int path_len);
index 87e4191c250ebea7f3f0acb3ca23d3fcb9a9639c..546b3e3b300ba8c6b8a4dc67bfe8c400d5b48de9 100644 (file)
 #define OVL1_MOUT_EN_COLOR1            0x1
 #define GAMMA_MOUT_EN_RDMA1            0x1
 #define RDMA0_SOUT_DPI0                        0x2
+#define RDMA0_SOUT_DPI1                        0x3
+#define RDMA0_SOUT_DSI1                        0x1
 #define RDMA0_SOUT_DSI2                        0x4
 #define RDMA0_SOUT_DSI3                        0x5
 #define RDMA1_SOUT_DPI0                        0x2
 #define DPI0_SEL_IN_RDMA2              0x3
 #define DPI1_SEL_IN_RDMA1              (0x1 << 8)
 #define DPI1_SEL_IN_RDMA2              (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1              0x1
+#define DSI0_SEL_IN_RDMA2              0x4
 #define DSI1_SEL_IN_RDMA1              0x1
 #define DSI1_SEL_IN_RDMA2              0x4
 #define DSI2_SEL_IN_RDMA1              (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
                *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
                value = RDMA0_SOUT_DPI0;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DPI1;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DSI1;
        } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
                *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
                value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
                *addr = DISP_REG_CONFIG_DPI_SEL_IN;
                value = DPI1_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI0_SEL_IN_RDMA1;
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
                *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
                value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
                *addr = DISP_REG_CONFIG_DPI_SEL_IN;
                value = DPI1_SEL_IN_RDMA2;
-       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
                *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI0_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
                value = DSI1_SEL_IN_RDMA2;
        } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
                *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
index 7413ffeb3c9d8bee1ca6bb825eb7a54391c7d013..8399229e6ad2661b77a52a5d9f30eb7502ee2879 100644 (file)
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
        void (*stop)(struct mtk_ddp_comp *comp);
        void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
        void (*disable_vblank)(struct mtk_ddp_comp *comp);
+       unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
        void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
        void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
        void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
                comp->funcs->disable_vblank(comp);
 }
 
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+       if (comp->funcs && comp->funcs->layer_nr)
+               return comp->funcs->layer_nr(comp);
+
+       return 0;
+}
+
 static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
                                         unsigned int idx)
 {
index 39721119713bc29d6cba63e2947d6be1ddc439cd..47ec604289b712148fdfa740a2d6c64ca4d0810f 100644 (file)
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
 err_deinit:
        mtk_drm_kms_deinit(drm);
 err_free:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        return ret;
 }
 
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
        struct mtk_drm_private *private = dev_get_drvdata(dev);
 
        drm_dev_unregister(private->drm);
-       drm_dev_unref(private->drm);
+       drm_dev_put(private->drm);
        private->drm = NULL;
 }
 
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
 
        drm_dev_unregister(drm);
        mtk_drm_kms_deinit(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        component_master_del(&pdev->dev, &mtk_drm_ops);
        pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
 {
        struct mtk_drm_private *private = dev_get_drvdata(dev);
        struct drm_device *drm = private->drm;
+       int ret;
 
-       drm_kms_helper_poll_disable(drm);
-
-       private->suspend_state = drm_atomic_helper_suspend(drm);
-       if (IS_ERR(private->suspend_state)) {
-               drm_kms_helper_poll_enable(drm);
-               return PTR_ERR(private->suspend_state);
-       }
-
+       ret = drm_mode_config_helper_suspend(drm);
        DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
-       return 0;
+
+       return ret;
 }
 
 static int mtk_drm_sys_resume(struct device *dev)
 {
        struct mtk_drm_private *private = dev_get_drvdata(dev);
        struct drm_device *drm = private->drm;
+       int ret;
 
-       drm_atomic_helper_resume(drm, private->suspend_state);
-       drm_kms_helper_poll_enable(drm);
-
+       ret = drm_mode_config_helper_resume(drm);
        DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
-       return 0;
+
+       return ret;
 }
 #endif
 
index 8412119bd94058b7197c531c49fc7ad88a9e6fab..5691dfa1db6fe388bcf50b2db79c3aa5a05a7696 100644 (file)
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
        int ret;
 
        if (dpcd >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+               /* Even if we're enabling MST, start with disabling the
+                * branching unit to clear any sink-side MST topology state
+                * that wasn't set by us
+                */
+               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
                if (ret < 0)
                        return ret;
 
-               dpcd &= ~DP_MST_EN;
-               if (state)
-                       dpcd |= DP_MST_EN;
-
-               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
-               if (ret < 0)
-                       return ret;
+               if (state) {
+                       /* Now, start initializing */
+                       ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+                                                DP_MST_EN);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
 int
 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
 {
-       int ret, state = 0;
+       struct drm_dp_aux *aux;
+       int ret;
+       bool old_state, new_state;
+       u8 mstm_ctrl;
 
        if (!mstm)
                return 0;
 
-       if (dpcd[0] >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+       mutex_lock(&mstm->mgr.lock);
+
+       old_state = mstm->mgr.mst_state;
+       new_state = old_state;
+       aux = mstm->mgr.aux;
+
+       if (old_state) {
+               /* Just check that the MST hub is still as we expect it */
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+               if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+                       DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+                       new_state = false;
+               }
+       } else if (dpcd[0] >= 0x12) {
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
                if (ret < 0)
-                       return ret;
+                       goto probe_error;
 
                if (!(dpcd[1] & DP_MST_CAP))
                        dpcd[0] = 0x11;
                else
-                       state = allow;
+                       new_state = allow;
+       }
+
+       if (new_state == old_state) {
+               mutex_unlock(&mstm->mgr.lock);
+               return new_state;
        }
 
-       ret = nv50_mstm_enable(mstm, dpcd[0], state);
+       ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
        if (ret)
-               return ret;
+               goto probe_error;
+
+       mutex_unlock(&mstm->mgr.lock);
 
-       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
        if (ret)
                return nv50_mstm_enable(mstm, dpcd[0], 0);
 
-       return mstm->mgr.mst_state;
+       return new_state;
+
+probe_error:
+       mutex_unlock(&mstm->mgr.lock);
+       return ret;
 }
 
 static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
 static const struct drm_mode_config_funcs
 nv50_disp_func = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
        .atomic_check = nv50_disp_atomic_check,
        .atomic_commit = nv50_disp_atomic_commit,
        .atomic_state_alloc = nv50_disp_atomic_state_alloc,
index 51932c72334ef6529abb18bc44762cc01ba2d176..247f72cc4d10a4547309effb6b8904e9c0d60d49 100644 (file)
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
 nouveau_connector_ddc_detect(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
-       struct nouveau_encoder *nv_encoder = NULL;
+       struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
        struct drm_encoder *encoder;
-       int i, panel = -ENODEV;
-
-       /* eDP panels need powering on by us (if the VBIOS doesn't default it
-        * to on) before doing any AUX channel transactions.  LVDS panel power
-        * is handled by the SOR itself, and not required for LVDS DDC.
-        */
-       if (nv_connector->type == DCB_CONNECTOR_eDP) {
-               panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
-               if (panel == 0) {
-                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-                       msleep(300);
-               }
-       }
+       int i, ret;
+       bool switcheroo_ddc = false;
 
        drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-                       int ret = nouveau_dp_detect(nv_encoder);
+               switch (nv_encoder->dcb->type) {
+               case DCB_OUTPUT_DP:
+                       ret = nouveau_dp_detect(nv_encoder);
                        if (ret == NOUVEAU_DP_MST)
                                return NULL;
-                       if (ret == NOUVEAU_DP_SST)
-                               break;
-               } else
-               if ((vga_switcheroo_handler_flags() &
-                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
-                   nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
-                   nv_encoder->i2c) {
-                       int ret;
-                       vga_switcheroo_lock_ddc(dev->pdev);
-                       ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
-                       vga_switcheroo_unlock_ddc(dev->pdev);
-                       if (ret)
+                       else if (ret == NOUVEAU_DP_SST)
+                               found = nv_encoder;
+
+                       break;
+               case DCB_OUTPUT_LVDS:
+                       switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+                                           VGA_SWITCHEROO_CAN_SWITCH_DDC);
+               /* fall-through */
+               default:
+                       if (!nv_encoder->i2c)
                                break;
-               } else
-               if (nv_encoder->i2c) {
+
+                       if (switcheroo_ddc)
+                               vga_switcheroo_lock_ddc(dev->pdev);
                        if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
-                               break;
+                               found = nv_encoder;
+                       if (switcheroo_ddc)
+                               vga_switcheroo_unlock_ddc(dev->pdev);
+
+                       break;
                }
+               if (found)
+                       break;
        }
 
-       /* eDP panel not detected, restore panel power GPIO to previous
-        * state to avoid confusing the SOR for other output types.
-        */
-       if (!nv_encoder && panel == 0)
-               nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
-       return nv_encoder;
+       return found;
 }
 
 static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       /* Outputs are only polled while runtime active, so acquiring a
-        * runtime PM ref here is unnecessary (and would deadlock upon
-        * runtime suspend because it waits for polling to finish).
+       /* Outputs are only polled while runtime active, so resuming the
+        * device here is unnecessary (and would deadlock upon runtime suspend
+        * because it waits for polling to finish). We do however, want to
+        * prevent the autosuspend timer from elapsing during this operation
+        * if possible.
         */
-       if (!drm_kms_helper_is_poll_worker()) {
-               ret = pm_runtime_get_sync(connector->dev->dev);
+       if (drm_kms_helper_is_poll_worker()) {
+               pm_runtime_get_noresume(dev->dev);
+       } else {
+               ret = pm_runtime_get_sync(dev->dev);
                if (ret < 0 && ret != -EACCES)
                        return conn_status;
        }
@@ -638,10 +628,8 @@ detect_analog:
 
  out:
 
-       if (!drm_kms_helper_is_poll_worker()) {
-               pm_runtime_mark_last_busy(connector->dev->dev);
-               pm_runtime_put_autosuspend(connector->dev->dev);
-       }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 
        return conn_status;
 }
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
        const struct nvif_notify_conn_rep_v0 *rep = notify->data;
        const char *name = connector->name;
        struct nouveau_encoder *nv_encoder;
+       int ret;
+
+       ret = pm_runtime_get(drm->dev->dev);
+       if (ret == 0) {
+               /* We can't block here if there's a pending PM request
+                * running, as we'll deadlock nouveau_display_fini() when it
+                * calls nvif_put() on our nvif_notify struct. So, simply
+                * defer the hotplug event until the device finishes resuming
+                */
+               NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+                        name);
+               schedule_work(&drm->hpd_work);
+
+               pm_runtime_put_noidle(drm->dev->dev);
+               return NVIF_NOTIFY_KEEP;
+       } else if (ret != 1 && ret != -EACCES) {
+               NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+                       name, ret);
+               return NVIF_NOTIFY_DROP;
+       }
 
        if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
                NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
                drm_helper_hpd_irq_event(connector->dev);
        }
 
+       pm_runtime_mark_last_busy(drm->dev->dev);
+       pm_runtime_put_autosuspend(drm->dev->dev);
        return NVIF_NOTIFY_KEEP;
 }
 
index 139368b31916b0f5a680916f0bc261ece493ec8f..540c0cbbfcee41bf3fdae2fa339d71373e58a133 100644 (file)
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
 
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
        pm_runtime_get_sync(drm->dev->dev);
 
        drm_helper_hpd_irq_event(drm->dev);
-       /* enable polling for external displays */
-       drm_kms_helper_poll_enable(drm->dev);
 
        pm_runtime_mark_last_busy(drm->dev->dev);
        pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
 {
        struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
        struct acpi_bus_event *info = data;
+       int ret;
 
        if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
                if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-                       /*
-                        * This may be the only indication we receive of a
-                        * connector hotplug on a runtime suspended GPU,
-                        * schedule hpd_work to check.
-                        */
-                       schedule_work(&drm->hpd_work);
+                       ret = pm_runtime_get(drm->dev->dev);
+                       if (ret == 1 || ret == -EACCES) {
+                               /* If the GPU is already awake, or in a state
+                                * where we can't wake it up, it can handle
+                                * it's own hotplug events.
+                                */
+                               pm_runtime_put_autosuspend(drm->dev->dev);
+                       } else if (ret == 0) {
+                               /* This may be the only indication we receive
+                                * of a connector hotplug on a runtime
+                                * suspended GPU, schedule hpd_work to check.
+                                */
+                               NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+                               schedule_work(&drm->hpd_work);
+                               pm_runtime_put_noidle(drm->dev->dev);
+                       } else {
+                               NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+                                       ret);
+                       }
 
                        /* acpi-video should not generate keypresses for this */
                        return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /* enable connector detection and polling for connectors without HPD
+        * support
+        */
+       drm_kms_helper_poll_enable(dev);
+
        /* enable hotplug interrupts */
        drm_connector_list_iter_begin(dev, &conn_iter);
        nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
 }
 
 void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
 {
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        }
        drm_connector_list_iter_end(&conn_iter);
 
+       if (!runtime)
+               cancel_work_sync(&drm->hpd_work);
+
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
 }
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
                        }
                }
 
-               nouveau_display_fini(dev, true);
+               nouveau_display_fini(dev, true, runtime);
                return 0;
        }
 
-       nouveau_display_fini(dev, true);
+       nouveau_display_fini(dev, true, runtime);
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
index 54aa7c3fa42dddfa96b684d308def526da9c4d1d..ff92b54ce448467b290a0c61f59d3e259fed19df 100644 (file)
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
 int  nouveau_display_create(struct drm_device *dev);
 void nouveau_display_destroy(struct drm_device *dev);
 int  nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
 int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
index c7ec86d6c3c910fdb73a7774eb921a414c156363..74d2283f2c28e7bb06f97856f93b610d12522a04 100644 (file)
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                mutex_unlock(&drm->master.lock);
        }
        if (ret) {
-               NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
                goto done;
        }
 
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
-               NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->device.object, mmus);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MMU class\n");
+               NV_PRINTK(err, cli, "No supported MMU class\n");
                goto done;
        }
 
        ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
        if (ret) {
-               NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, vmms);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported VMM class\n");
+               NV_PRINTK(err, cli, "No supported VMM class\n");
                goto done;
        }
 
        ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
        if (ret) {
-               NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, mems);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MEM class\n");
+               NV_PRINTK(err, cli, "No supported MEM class\n");
                goto done;
        }
 
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put(dev->dev);
-       } else {
-               /* enable polling for external displays */
-               drm_kms_helper_poll_enable(dev);
        }
+
        return 0;
 
 fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
        nouveau_debugfs_fini(drm);
 
        if (dev->mode_config.num_crtc)
-               nouveau_display_fini(dev, false);
+               nouveau_display_fini(dev, false, false);
        nouveau_display_destroy(dev);
 
        nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
                return -EBUSY;
        }
 
-       drm_kms_helper_poll_disable(drm_dev);
        nouveau_switcheroo_optimus_dsm();
        ret = nouveau_do_suspend(drm_dev, true);
        pci_save_state(pdev);
index 844498c4267cb691ecde39083740ca6d08a86390..0f64c0a1d4b30236243e6229afa19d990592f86c 100644 (file)
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
        console_unlock();
 
        if (state == FBINFO_STATE_RUNNING) {
+               nouveau_fbcon_hotplug_resume(drm->fbcon);
                pm_runtime_mark_last_busy(drm->dev->dev);
                pm_runtime_put_sync(drm->dev->dev);
        }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
        schedule_work(&drm->fbcon_work);
 }
 
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_fbdev *fbcon = drm->fbcon;
+       int ret;
+
+       if (!fbcon)
+               return;
+
+       mutex_lock(&fbcon->hotplug_lock);
+
+       ret = pm_runtime_get(dev->dev);
+       if (ret == 1 || ret == -EACCES) {
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+
+               pm_runtime_mark_last_busy(dev->dev);
+               pm_runtime_put_autosuspend(dev->dev);
+       } else if (ret == 0) {
+               /* If the GPU was already in the process of suspending before
+                * this event happened, then we can't block here as we'll
+                * deadlock the runtime pmops since they wait for us to
+                * finish. So, just defer this event for when we runtime
+                * resume again. It will be handled by fbcon_work.
+                */
+               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+               fbcon->hotplug_waiting = true;
+               pm_runtime_put_noidle(drm->dev->dev);
+       } else {
+               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+                        ret);
+       }
+
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+       struct nouveau_drm *drm;
+
+       if (!fbcon)
+               return;
+       drm = nouveau_drm(fbcon->helper.dev);
+
+       mutex_lock(&fbcon->hotplug_lock);
+       if (fbcon->hotplug_waiting) {
+               fbcon->hotplug_waiting = false;
+
+               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+       }
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
 int
 nouveau_fbcon_init(struct drm_device *dev)
 {
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 
        drm->fbcon = fbcon;
        INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+       mutex_init(&fbcon->hotplug_lock);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index a6f192ea3fa6758934f2a54054f10708d2c73f14..db9d52047ef8dfbfc77fa96fcdb250c16f18e951 100644 (file)
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
        struct nvif_object gdi;
        struct nvif_object blit;
        struct nvif_object twod;
+
+       struct mutex hotplug_lock;
+       bool hotplug_waiting;
 };
 
 void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
 void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
 void nouveau_fbcon_accel_restore(struct drm_device *dev);
 
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
 extern int nouveau_nofbaccel;
 
 #endif /* __NV50_FBCON_H__ */
index 3da5a4305aa4847a32c0abf398a50e7a0dc90b7e..8f1ce4833230a1e693f0a76387022289fc1317d3 100644 (file)
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                pr_err("VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                nouveau_pmops_resume(&pdev->dev);
-               drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("VGA switcheroo: switched nouveau off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
                nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
index 32fa94a9773f558686cf8a698ac3de53305a6007..cbd33e87b799a70b6085707de74bafaefe047e24 100644 (file)
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        struct nvkm_outp *outp, *outt, *pair;
        struct nvkm_conn *conn;
        struct nvkm_head *head;
+       struct nvkm_ior *ior;
        struct nvbios_connE connE;
        struct dcb_output dcbE;
        u8  hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
                        return ret;
        }
 
+       /* Enforce identity-mapped SOR assignment for panels, which have
+        * certain bits (ie. backlight controls) wired to a specific SOR.
+        */
+       list_for_each_entry(outp, &disp->outp, head) {
+               if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+                   outp->conn->info.type == DCB_CONNECTOR_eDP) {
+                       ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+                       if (!WARN_ON(!ior))
+                               ior->identity = true;
+                       outp->identity = true;
+               }
+       }
+
        i = 0;
        list_for_each_entry(head, &disp->head, head)
                i = max(i, head->id + 1);
index 7c5bed29ffef164224825a3f9e0eaac7aa786239..5f301e632599b471c54291797b59c0d51e18be9a 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
 #include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 }
 
 static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
 
-       /* Prevent link from being retrained if sink sends an IRQ. */
-       atomic_set(&dp->lt.done, 0);
-       ior->dp.nr = 0;
-
        /* Execute DisableLT script from DP Info Table. */
        nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
                init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
        );
 }
 
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+       struct nvkm_dp *dp = nvkm_dp(outp);
+
+       /* Prevent link from being retrained if sink sends an IRQ. */
+       atomic_set(&dp->lt.done, 0);
+       dp->outp.ior->dp.nr = 0;
+}
+
 static int
 nvkm_dp_acquire(struct nvkm_outp *outp)
 {
@@ -491,7 +498,7 @@ done:
        return ret;
 }
 
-static void
+static bool
 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 {
        struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 
                if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
                                sizeof(dp->dpcd)))
-                       return;
+                       return true;
        }
 
        if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
        }
 
        atomic_set(&dp->lt.done, 0);
+       return false;
 }
 
 static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
 static void
 nvkm_dp_init(struct nvkm_outp *outp)
 {
+       struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
        struct nvkm_dp *dp = nvkm_dp(outp);
+
        nvkm_notify_put(&dp->outp.conn->hpd);
-       nvkm_dp_enable(dp, true);
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+               int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+               /* We delay here unconditionally, even if already powered,
+                * because some laptop panels having a significant resume
+                * delay before the panel begins responding.
+                *
+                * This is likely a bit of a hack, but no better idea for
+                * handling this at the moment.
+                */
+               msleep(300);
+
+               /* If the eDP panel can't be detected, we need to restore
+                * the panel power GPIO to avoid breaking another output.
+                */
+               if (!nvkm_dp_enable(dp, true) && power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+       } else {
+               nvkm_dp_enable(dp, true);
+       }
+
        nvkm_notify_get(&dp->hpd);
 }
 
@@ -576,6 +613,7 @@ nvkm_dp_func = {
        .fini = nvkm_dp_fini,
        .acquire = nvkm_dp_acquire,
        .release = nvkm_dp_release,
+       .disable = nvkm_dp_disable,
 };
 
 static int
index e0b4e0c5704ee8560a5584832da069ae59f699e1..19911211a12aa55a200ac75bc2f88958598d96bd 100644 (file)
@@ -16,6 +16,7 @@ struct nvkm_ior {
        char name[8];
 
        struct list_head head;
+       bool identity;
 
        struct nvkm_ior_state {
                struct nvkm_outp *outp;
index f89c7b977aa5d269b3a91a0026610eb036c0463e..def005dd5fdaae020fd3ef7404e9f58b5369e082 100644 (file)
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
        nv50_disp_super_ied_off(head, ior, 2);
 
        /* If we're shutting down the OR's only active head, execute
-        * the output path's release function.
+        * the output path's disable function.
         */
        if (ior->arm.head == (1 << head->id)) {
-               if ((outp = ior->arm.outp) && outp->func->release)
-                       outp->func->release(outp, ior);
+               if ((outp = ior->arm.outp) && outp->func->disable)
+                       outp->func->disable(outp, ior);
        }
 }
 
index be9e7f8c3b2392fa96643f91391e606196ad7a67..c62030c96fba0932decd996fd46f3f9a308864d9 100644 (file)
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
        if (ior) {
                outp->acquired &= ~user;
                if (!outp->acquired) {
+                       if (outp->func->release && outp->ior)
+                               outp->func->release(outp);
                        outp->ior->asy.outp = NULL;
                        outp->ior = NULL;
                }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
        if (proto == UNKNOWN)
                return -ENOSYS;
 
+       /* Deal with panels requiring identity-mapped SOR assignment. */
+       if (outp->identity) {
+               ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+               if (WARN_ON(!ior))
+                       return -ENOSPC;
+               return nvkm_outp_acquire_ior(outp, user, ior);
+       }
+
        /* First preference is to reuse the OR that is currently armed
         * on HW, if any, in order to prevent unnecessary switching.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->arm.outp == outp)
+               if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
 
        /* Failing that, a completely unused OR is the next best thing. */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+               if (!ior->identity &&
+                   !ior->asy.outp && ior->type == type && !ior->arm.outp &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
         * but will be released during the next modeset.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type &&
+               if (!ior->identity && !ior->asy.outp && ior->type == type &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->index = index;
        outp->info = *dcbE;
        outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
-       outp->or = ffs(outp->info.or) - 1;
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ea84d7d5741ad5a5e771483a225f6fea8a663223..6c8aa5cfed9d839b561a473e0883df827d16eb3c 100644 (file)
@@ -13,10 +13,10 @@ struct nvkm_outp {
        struct dcb_output info;
 
        struct nvkm_i2c_bus *i2c;
-       int or;
 
        struct list_head head;
        struct nvkm_conn *conn;
+       bool identity;
 
        /* Assembly state. */
 #define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
        void (*init)(struct nvkm_outp *);
        void (*fini)(struct nvkm_outp *);
        int (*acquire)(struct nvkm_outp *);
-       void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+       void (*release)(struct nvkm_outp *);
+       void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
 };
 
 #define OUTP_MSG(o,l,f,a...) do {                                              \
index b80618e354919b11b00b6c88b12dbc648ba4680f..17235e940ca9e354226836b4bb9e8582a48e1f90 100644 (file)
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
        struct nvkm_bios *bios = subdev->device->bios;
        struct nvbios_pmuR pmu;
 
-       if (!nvbios_pmuRm(bios, type, &pmu)) {
-               nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+       if (!nvbios_pmuRm(bios, type, &pmu))
                return -EINVAL;
-       }
 
        if (!post)
                return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                return -EINVAL;
        }
 
+       /* Upload DEVINIT application from VBIOS onto PMU. */
        ret = pmu_load(init, 0x04, post, &exec, &args);
-       if (ret)
+       if (ret) {
+               nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
                return ret;
+       }
 
-       /* upload first chunk of init data */
+       /* Upload tables required by opcodes in boot scripts. */
        if (post) {
-               // devinit tables
                u32 pmu = pmu_args(init, args + 0x08, 0x08);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
                pmu_data(init, pmu, img, len);
        }
 
-       /* upload second chunk of init data */
+       /* Upload boot scripts. */
        if (post) {
-               // devinit boot scripts
                u32 pmu = pmu_args(init, args + 0x08, 0x10);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
                pmu_data(init, pmu, img, len);
        }
 
-       /* execute init tables */
+       /* Execute DEVINIT. */
        if (post) {
                nvkm_wr32(device, 0x10a040, 0x00005000);
                pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                        return -ETIMEDOUT;
        }
 
-       /* load and execute some other ucode image (bios therm?) */
-       return pmu_load(init, 0x01, post, NULL, NULL);
+       /* Optional: Execute PRE_OS application on PMU, which should at
+        * least take care of fans until a full PMU has been loaded.
+        */
+       pmu_load(init, 0x01, post, NULL, NULL);
+       return 0;
 }
 
 static const struct nvkm_devinit_func
index de269eb482dd03cc7c2fd8b5a707d1566b1360e5..7459def78d504f006a2f7f0b625ba3372238b7ec 100644 (file)
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
 void
 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
-       if (vmm->func->part && inst) {
+       if (inst && vmm->func->part) {
                mutex_lock(&vmm->mutex);
                vmm->func->part(vmm, inst);
                mutex_unlock(&vmm->mutex);
index a534b225e31b33b20184dab5c72fd7d98970f9f4..5fa0441bb6df8fea7fd54433a7f7b68cfa28d3be 100644 (file)
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id vexpress_muxfpga_match[] = {
-       { .compatible = "arm,vexpress-muxfpga", }
+       { .compatible = "arm,vexpress-muxfpga", },
+       {}
 };
 
 static struct platform_driver vexpress_muxfpga_driver = {
index dd19d674055c625c20e4e78cfdc61b50fefa6743..8b0cd08034e0c74bb80f57fca43710228077b29c 100644 (file)
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
        { .compatible = "allwinner,sun8i-a33-display-engine" },
        { .compatible = "allwinner,sun8i-a83t-display-engine" },
        { .compatible = "allwinner,sun8i-h3-display-engine" },
-       { .compatible = "allwinner,sun8i-r40-display-engine" },
        { .compatible = "allwinner,sun8i-v3s-display-engine" },
        { .compatible = "allwinner,sun9i-a80-display-engine" },
        { }
index 82502b351aec8b6a9983b0fa8f88e6bb3106be15..a564b5dfe082839896833db50ffbabbcfbe014ee 100644 (file)
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
 
 static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
        .has_phy_clk = true,
-       .has_second_pll = true,
        .phy_init = &sun8i_hdmi_phy_init_h3,
        .phy_disable = &sun8i_hdmi_phy_disable_h3,
        .phy_config = &sun8i_hdmi_phy_config_h3,
index fc3713608f78d9742bb0364f7d81f2e3db10bd55..cb65b0ed53fd0e520a77327e185d0011e619b915 100644 (file)
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
        .vi_num         = 1,
 };
 
-static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
-       .ccsc           = 0,
-       .mod_rate       = 297000000,
-       .scaler_mask    = 0xf,
-       .ui_num         = 3,
-       .vi_num         = 1,
-};
-
-static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
-       .ccsc           = 1,
-       .mod_rate       = 297000000,
-       .scaler_mask    = 0x3,
-       .ui_num         = 1,
-       .vi_num         = 1,
-};
-
 static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
        .vi_num = 2,
        .ui_num = 1,
@@ -582,14 +566,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
                .compatible = "allwinner,sun8i-h3-de2-mixer-0",
                .data = &sun8i_h3_mixer0_cfg,
        },
-       {
-               .compatible = "allwinner,sun8i-r40-de2-mixer-0",
-               .data = &sun8i_r40_mixer0_cfg,
-       },
-       {
-               .compatible = "allwinner,sun8i-r40-de2-mixer-1",
-               .data = &sun8i_r40_mixer1_cfg,
-       },
        {
                .compatible = "allwinner,sun8i-v3s-de2-mixer",
                .data = &sun8i_v3s_mixer_cfg,
index 55fe398d8290d9107e9f803ad583122c5eb12522..d5240b777a8fdfc6da3aa5b9f57acab5597985ab 100644 (file)
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
 
 /* sun4i_drv uses this list to check if a device node is a TCON TOP */
 const struct of_device_id sun8i_tcon_top_of_table[] = {
-       { .compatible = "allwinner,sun8i-r40-tcon-top" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
index dbb62f6eb48a5fba107acd9981b3cfae45f8f6a4..dd9ffded223b5fb09c025d518d5090b27716b560 100644 (file)
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
 {
        drm_fb_helper_unregister_fbi(&ufbdev->helper);
        drm_fb_helper_fini(&ufbdev->helper);
-       drm_framebuffer_unregister_private(&ufbdev->ufb.base);
-       drm_framebuffer_cleanup(&ufbdev->ufb.base);
-       drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+       if (ufbdev->ufb.obj) {
+               drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+               drm_framebuffer_cleanup(&ufbdev->ufb.base);
+               drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+       }
 }
 
 int udl_fbdev_init(struct drm_device *dev)
index cfb50fedfa2b3a49bd37198f093eff841ecd3603..a3275fa66b7b9754c32580857e5b23fba0c58ec5 100644 (file)
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
        vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
                                                       vc4_state->crtc_h);
 
+       vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+                              vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
        if (num_planes > 1) {
                vc4_state->is_yuv = true;
 
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
                        vc4_get_scaling_mode(vc4_state->src_h[1],
                                             vc4_state->crtc_h);
 
-               /* YUV conversion requires that scaling be enabled,
-                * even on a plane that's otherwise 1:1.  Choose TPZ
-                * for simplicity.
+               /* YUV conversion requires that horizontal scaling be enabled,
+                * even on a plane that's otherwise 1:1. Looks like only PPF
+                * works in that case, so let's pick that one.
                 */
-               if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
-                       vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
-               if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
-                       vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+               if (vc4_state->is_unity)
+                       vc4_state->x_scaling[0] = VC4_SCALING_PPF;
        } else {
                vc4_state->x_scaling[1] = VC4_SCALING_NONE;
                vc4_state->y_scaling[1] = VC4_SCALING_NONE;
        }
 
-       vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
-                              vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
-                              vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
-                              vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
        /* No configuring scaling on the cursor plane, since it gets
           non-vblank-synced updates, and scaling requires requires
           LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
        }
 
-       if (!vc4_state->is_unity) {
+       if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+           vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+           vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+           vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
                /* LBM Base Address. */
                if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
                    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
index 1f134570b7599483e2768cfcbe785409f2b7ba04..f0ab6b2313bbed89f5879fd27a6e2c268fc8c613 100644 (file)
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
        struct vmw_buffer_object *vbo =
                container_of(bo, struct vmw_buffer_object, base);
-       struct ttm_operation_ctx ctx = { interruptible, true };
+       struct ttm_operation_ctx ctx = { interruptible, false };
        int ret;
 
        if (vbo->pin_count > 0)
index ddb1e9365a3e52ffc36465708ce676dff5158bec..b93c558dd86e0121741284becc87434a27b39d2a 100644 (file)
@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 {
        struct vmwgfx_gmrid_man *gman =
                (struct vmwgfx_gmrid_man *)man->priv;
-       int ret = 0;
        int id;
 
        mem->mm_node = NULL;
 
+       id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+       if (id < 0)
+               return id;
+
        spin_lock(&gman->lock);
 
        if (gman->max_gmr_pages > 0) {
                gman->used_gmr_pages += bo->num_pages;
                if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
-                       goto out_err_locked;
+                       goto nospace;
        }
 
-       do {
-               spin_unlock(&gman->lock);
-               if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
-                       ret = -ENOMEM;
-                       goto out_err;
-               }
-               spin_lock(&gman->lock);
-
-               ret = ida_get_new(&gman->gmr_ida, &id);
-               if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
-                       ida_remove(&gman->gmr_ida, id);
-                       ret = 0;
-                       goto out_err_locked;
-               }
-       } while (ret == -EAGAIN);
-
-       if (likely(ret == 0)) {
-               mem->mm_node = gman;
-               mem->start = id;
-               mem->num_pages = bo->num_pages;
-       } else
-               goto out_err_locked;
+       mem->mm_node = gman;
+       mem->start = id;
+       mem->num_pages = bo->num_pages;
 
        spin_unlock(&gman->lock);
        return 0;
 
-out_err:
-       spin_lock(&gman->lock);
-out_err_locked:
+nospace:
        gman->used_gmr_pages -= bo->num_pages;
        spin_unlock(&gman->lock);
-       return ret;
+       ida_free(&gman->gmr_ida, id);
+       return 0;
 }
 
 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
                (struct vmwgfx_gmrid_man *)man->priv;
 
        if (mem->mm_node) {
+               ida_free(&gman->gmr_ida, mem->start);
                spin_lock(&gman->lock);
-               ida_remove(&gman->gmr_ida, mem->start);
                gman->used_gmr_pages -= mem->num_pages;
                spin_unlock(&gman->lock);
                mem->mm_node = NULL;
index 23beff5d8e3c37e6904314db295cc9896657a38f..6a712a8d59e93b68fb68c98358af37b5e20d7680 100644 (file)
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
                                        struct drm_rect *rects)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_rect bounding_box = {0};
        u64 total_pixels = 0, pixel_mem, bb_mem;
        int i;
 
        for (i = 0; i < num_rects; i++) {
                /*
-                * Currently this check is limiting the topology within max
-                * texture/screentarget size. This should change in future when
-                * user-space support multiple fb with topology.
+                * For STDU only individual screen (screen target) is limited by
+                * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
                 */
-               if (rects[i].x1 < 0 ||  rects[i].y1 < 0 ||
-                   rects[i].x2 > mode_config->max_width ||
-                   rects[i].y2 > mode_config->max_height) {
-                       DRM_ERROR("Invalid GUI layout.\n");
+               if (dev_priv->active_display_unit == vmw_du_screen_target &&
+                   (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+                    drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+                       DRM_ERROR("Screen size not supported.\n");
                        return -EINVAL;
                }
 
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
                struct drm_connector_state *conn_state;
                struct vmw_connector_state *vmw_conn_state;
 
-               if (!new_crtc_state->enable && old_crtc_state->enable) {
+               if (!new_crtc_state->enable) {
                        rects[i].x1 = 0;
                        rects[i].y1 = 0;
                        rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
        if (dev_priv->assume_16bpp)
                assumed_bpp = 2;
 
+       max_width  = min(max_width,  dev_priv->texture_max_width);
+       max_height = min(max_height, dev_priv->texture_max_height);
+
+       /*
+        * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+        * HEIGHT registers.
+        */
        if (dev_priv->active_display_unit == vmw_du_screen_target) {
                max_width  = min(max_width,  dev_priv->stdu_max_width);
-               max_width  = min(max_width,  dev_priv->texture_max_width);
-
                max_height = min(max_height, dev_priv->stdu_max_height);
-               max_height = min(max_height, dev_priv->texture_max_height);
        }
 
        /* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_vmw_update_layout_arg *arg =
                (struct drm_vmw_update_layout_arg *)data;
        void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                drm_rects[i].y1 = curr_rect.y;
                drm_rects[i].x2 = curr_rect.x + curr_rect.w;
                drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+               /*
+                * Currently this check is limiting the topology within
+                * mode_config->max (which actually is max texture size
+                * supported by virtual device). This limit is here to address
+                * window managers that create a big framebuffer for whole
+                * topology.
+                */
+               if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
+                   drm_rects[i].x2 > mode_config->max_width ||
+                   drm_rects[i].y2 > mode_config->max_height) {
+                       DRM_ERROR("Invalid GUI layout.\n");
+                       ret = -EINVAL;
+                       goto out_free;
+               }
        }
 
        ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
index 93f6b96ca7bbbe179dfa74957dd854c04b06b17c..f30e839f7bfd2d946bbe908e20211719646ff8b5 100644 (file)
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
 
        dev_priv->active_display_unit = vmw_du_screen_target;
 
-       if (dev_priv->capabilities & SVGA_CAP_3D) {
-               /*
-                * For 3D VMs, display (scanout) buffer size is the smaller of
-                * max texture and max STDU
-                */
-               uint32_t max_width, max_height;
-
-               max_width = min(dev_priv->texture_max_width,
-                               dev_priv->stdu_max_width);
-               max_height = min(dev_priv->texture_max_height,
-                                dev_priv->stdu_max_height);
-
-               dev->mode_config.max_width = max_width;
-               dev->mode_config.max_height = max_height;
-       } else {
-               /*
-                * Given various display aspect ratios, there's no way to
-                * estimate these using prim_bb_mem.  So just set these to
-                * something arbitrarily large and we will reject any layout
-                * that doesn't fit prim_bb_mem later
-                */
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       }
-
        vmw_kms_create_implicit_placement_property(dev_priv, false);
 
        for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
index e125233e074bf82a1128543ca5c8f9fd2b28604c..80a01cd4c051338654b629f3205b32259dd53ba3 100644 (file)
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        *srf_out = NULL;
 
        if (for_scanout) {
-               uint32_t max_width, max_height;
-
                if (!svga3dsurface_is_screen_target_format(format)) {
                        DRM_ERROR("Invalid Screen Target surface format.");
                        return -EINVAL;
                }
 
-               max_width = min(dev_priv->texture_max_width,
-                               dev_priv->stdu_max_width);
-               max_height = min(dev_priv->texture_max_height,
-                                dev_priv->stdu_max_height);
-
-               if (size.width > max_width || size.height > max_height) {
+               if (size.width > dev_priv->texture_max_width ||
+                   size.height > dev_priv->texture_max_height) {
                        DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
-                                 max_width, max_height);
+                                 dev_priv->texture_max_width,
+                                 dev_priv->texture_max_height);
                        return -EINVAL;
                }
        } else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
                srf->res.backup_size += sizeof(SVGA3dDXSOState);
 
+       /*
+        * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+        * size greater than STDU max width/height. This is really a workaround
+        * to support creation of big framebuffer requested by some user-space
+        * for whole topology. That big framebuffer won't really be used for
+        * binding with screen target as during prepare_fb a separate surface is
+        * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+        */
        if (dev_priv->active_display_unit == vmw_du_screen_target &&
-           for_scanout)
+           for_scanout && size.width <= dev_priv->stdu_max_width &&
+           size.height <= dev_priv->stdu_max_height)
                srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
 
        /*
index a96bf46bc483fa42e2cd2e185a345ed24356b606..cf2a18571d484d078dc1eabc59a3d6ff0f11ab07 100644 (file)
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
                        return;
 
                client->id = ret | ID_BIT_AUDIO;
+               if (client->ops->gpu_bound)
+                       client->ops->gpu_bound(client->pdev, ret);
        }
 
        vga_switcheroo_debugfs_init(&vgasr_priv);
index 25b7bd56ae1156aa209980f65a00d2c5a6efebf2..1cb41992aaa1f650f89cbf5ace72bd46786d7abc 100644 (file)
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+       if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+                       usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
                /* The fn key on Apple USB keyboards */
                set_bit(EV_REP, hi->input->evbit);
                hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
index 3da354af7a0aac6ca129124eb6e511ea32c9535b..44564f61e9cc3c85250e2e6d1e5ff47ed95dcd4d 100644 (file)
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
        parser = vzalloc(sizeof(struct hid_parser));
        if (!parser) {
                ret = -ENOMEM;
-               goto err;
+               goto alloc_err;
        }
 
        parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
                                hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
+                       kfree(parser->collection_stack);
                        vfree(parser);
                        device->status |= HID_STAT_PARSED;
                        return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
 
        hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
 err:
+       kfree(parser->collection_stack);
+alloc_err:
        vfree(parser);
        hid_close_report(device);
        return ret;
index 79bdf0c7e3516bc0d5055309cac268145507f9d2..5146ee029db4bd6c35bb3a15cdd25b6caf54c64e 100644 (file)
@@ -88,6 +88,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
 #define USB_VENDOR_ID_APPLE            0x05ac
+#define BT_VENDOR_ID_APPLE             0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE        0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD      0x030e
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
 #define I2C_VENDOR_ID_HANTICK          0x0911
 #define I2C_PRODUCT_ID_HANTICK_5288    0x5288
 
-#define I2C_VENDOR_ID_RAYD             0x2386
-#define I2C_PRODUCT_ID_RAYD_3118       0x3118
-
 #define USB_VENDOR_ID_HANWANG          0x0b57
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST     0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST      0x8fff
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION    0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9      0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
index 4e94ea3e280a3c66a00cdf0709569a131bf4ee49..a481eaf39e887bad41d89bf251c0dc4a8f2a2096 100644 (file)
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
        input_dev->dev.parent = &hid->dev;
 
        hidinput->input = input_dev;
+       hidinput->application = application;
        list_add_tail(&hidinput->list, &hid->inputs);
 
        INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
        struct hid_input *hidinput;
 
        list_for_each_entry(hidinput, &hid->inputs, list) {
-               if (hidinput->report &&
-                   hidinput->report->application == report->application)
+               if (hidinput->application == report->application)
                        return hidinput;
        }
 
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
                        input_unregister_device(hidinput->input);
                else
                        input_free_device(hidinput->input);
+               kfree(hidinput->name);
                kfree(hidinput);
        }
 
index 40fbb7c52723378eaf32d351f9541dd7ef0d61e3..da954f3f4da7fcf5071522eec3a6b5b62dd46eed 100644 (file)
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                                     struct hid_usage *usage,
                                     enum latency_mode latency,
                                     bool surface_switch,
-                                    bool button_switch)
+                                    bool button_switch,
+                                    bool *inputmode_found)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
 
        switch (usage->hid) {
        case HID_DG_INPUTMODE:
+               /*
+                * Some elan panels wrongly declare 2 input mode features,
+                * and silently ignore when we set the value in the second
+                * field. Skip the second feature and hope for the best.
+                */
+               if (*inputmode_found)
+                       return false;
+
                if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
                        report_len = hid_report_len(report);
                        buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                }
 
                field->value[index] = td->inputmode_value;
+               *inputmode_found = true;
                return true;
 
        case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
        struct hid_usage *usage;
        int i, j;
        bool update_report;
+       bool inputmode_found = false;
 
        rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
        list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
                                                             usage,
                                                             latency,
                                                             surface_switch,
-                                                            button_switch))
+                                                            button_switch,
+                                                            &inputmode_found))
                                        update_report = true;
                        }
                }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
 
+       if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+               hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
        timer_setup(&td->release_timer, mt_expired_timeout, 0);
 
        ret = hid_parse(hdev);
index 39e642686ff0466322cdacf7ea410fd6306932e5..683861f324e3cd6842f30016f4911b4b088d8a2a 100644 (file)
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 50af72baa5ca9a6dbf37505cda39fbd4290668c4..2b63487057c25b7fb931b8823db31d15f69667be 100644 (file)
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
 }
 EXPORT_SYMBOL_GPL(sensor_hub_device_close);
 
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize)
+{
+       /*
+        * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+        * minimum for magnetic flux axis greater than the maximum.
+        */
+       if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+               *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+               rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+               rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+               rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+               /* Sets negative logical minimum for mag x, y and z */
+               rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+               rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+               rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+               rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+       }
+
+       return rdesc;
+}
+
 static int sensor_hub_probe(struct hid_device *hdev,
                                const struct hid_device_id *id)
 {
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
        .probe = sensor_hub_probe,
        .remove = sensor_hub_remove,
        .raw_event = sensor_hub_raw_event,
+       .report_fixup = sensor_hub_report_fixup,
 #ifdef CONFIG_PM
        .suspend = sensor_hub_suspend,
        .resume = sensor_hub_resume,
index 2ce194a84868e0fe5e133e241a0de368dfe3becf..f3076659361abcb0567804c298af25e278de8fa9 100644 (file)
@@ -170,8 +170,6 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
                I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
                I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { 0, 0 }
@@ -1235,11 +1233,16 @@ static int i2c_hid_resume(struct device *dev)
        pm_runtime_enable(dev);
 
        enable_irq(client->irq);
-       ret = i2c_hid_hwreset(client);
+
+       /* Instead of resetting device, simply powers the device on. This
+        * solves "incomplete reports" on Raydium devices 2386:3118 and
+        * 2386:4B33
+        */
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
        if (ret)
                return ret;
 
-       /* RAYDIUM device (2386:3118) need to re-send report descr cmd
+       /* Some devices need to re-send report descr cmd
         * after resume, after this it will be back normal.
         * otherwise it issues too many incomplete reports.
         */
index 97869b7410ebb83ec78c31f2c9a1b65b6597a613..da133716bed05b63dadef22e072f05b4e7b0f5da 100644 (file)
@@ -29,6 +29,7 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 050f9872f5c0dc7d465c71f505dcb995b53c4321..a1125a5c7965a255f8b5480f47cc8a53b534b76f 100644 (file)
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index b1b548a21f919b31f8e5d7f670352ee6f7d25f56..c71cc857b649ddc23f3289e23ec43e898901fa1a 100644 (file)
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
        if (!attribute->show)
                return -EIO;
 
+       if (chan->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
+
        return attribute->show(chan, buf);
 }
 
index 90837f7c7d0f3203ee64de86dcccd8a1e7cc26ad..f4c7516eb9891fa923a2a736bfcc45ef9b84a6b9 100644 (file)
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
 {
-       u16 val;
+       int val1, val2;
 
-       val = i2c_smbus_read_byte_data(client, reg);
-       val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+       val1 = i2c_smbus_read_byte_data(client, reg);
+       if (val1 < 0)
+               return val1;
+       val2 = i2c_smbus_read_byte_data(client, reg + 1);
+       if (val2 < 0)
+               return val2;
 
-       return val;
+       return val1 | (val2 << 8);
 }
 
 static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
 {
        struct adt7475_data *data = adt7475_update_device(dev);
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       int i = clamp_val(data->range[sattr->index] & 0xf, 0,
-                         ARRAY_SIZE(pwmfreq_table) - 1);
+       int idx;
 
        if (IS_ERR(data))
                return PTR_ERR(data);
+       idx = clamp_val(data->range[sattr->index] & 0xf, 0,
+                       ARRAY_SIZE(pwmfreq_table) - 1);
 
-       return sprintf(buf, "%d\n", pwmfreq_table[i]);
+       return sprintf(buf, "%d\n", pwmfreq_table[idx]);
 }
 
 static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
                                        char *buf)
 {
        struct adt7475_data *data = adt7475_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
 }
 
index e9e6aeabbf842ffd941e0476a185884adac85147..71d3445ba869c85654ae3dcaf3a5460e8fadb268 100644 (file)
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
        return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+                             struct device_attribute *da,
+                             char *buf)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
                                  struct device_attribute *da,
                                  const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-                         ina2xx_show_value, ina2xx_store_shunt,
+                         ina2xx_show_shunt, ina2xx_store_shunt,
                          INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
index c6bd61e4695abc01a7ed909f1d27f6944c45e246..78603b78cf410de903aa22d55147e6b600ab0398 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/bitops.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 #include "lm75.h"
 
 #define USE_ALTERNATE
@@ -206,8 +207,6 @@ superio_exit(int ioreg)
 
 #define NUM_FAN                7
 
-#define TEMP_SOURCE_VIRTUAL    0x1f
-
 /* Common and NCT6775 specific data */
 
 /* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -298,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
 
 static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
 static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
-static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
-static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
+static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
+       0x641, 0x642, 0x643, 0x644 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
 
 static const u16 NCT6775_REG_TEMP[] = {
        0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -372,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
 };
 
 #define NCT6775_TEMP_MASK      0x001ffffe
+#define NCT6775_VIRT_TEMP_MASK 0x00000000
 
 static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
        [13] = 0x661,
@@ -424,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
 
 static const u16 NCT6776_REG_FAN_MIN[] = {
        0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
-static const u16 NCT6776_REG_FAN_PULSES[] = {
-       0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
+       0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
 
 static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
        0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -460,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
 };
 
 #define NCT6776_TEMP_MASK      0x007ffffe
+#define NCT6776_VIRT_TEMP_MASK 0x00000000
 
 static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
        [14] = 0x401,
@@ -500,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
        30, 31 };                       /* intrusion0, intrusion1 */
 
 static const u16 NCT6779_REG_FAN[] = {
-       0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 };
-static const u16 NCT6779_REG_FAN_PULSES[] = {
-       0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+       0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
+static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
+       0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
 
 static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
        0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -558,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
 };
 
 #define NCT6779_TEMP_MASK      0x07ffff7e
+#define NCT6779_VIRT_TEMP_MASK 0x00000000
 #define NCT6791_TEMP_MASK      0x87ffff7e
+#define NCT6791_VIRT_TEMP_MASK 0x80000000
 
 static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
        = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -637,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
 };
 
 #define NCT6792_TEMP_MASK      0x9fffff7e
+#define NCT6792_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6793_temp_label[] = {
        "",
@@ -674,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
 };
 
 #define NCT6793_TEMP_MASK      0xbfff037e
+#define NCT6793_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6795_temp_label[] = {
        "",
@@ -711,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
 };
 
 #define NCT6795_TEMP_MASK      0xbfffff7e
+#define NCT6795_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6796_temp_label[] = {
        "",
@@ -723,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
        "AUXTIN4",
        "SMBUSMASTER 0",
        "SMBUSMASTER 1",
-       "",
-       "",
+       "Virtual_TEMP",
+       "Virtual_TEMP",
        "",
        "",
        "",
@@ -747,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
        "Virtual_TEMP"
 };
 
-#define NCT6796_TEMP_MASK      0xbfff03fe
+#define NCT6796_TEMP_MASK      0xbfff0ffe
+#define NCT6796_VIRT_TEMP_MASK 0x80000c00
 
 /* NCT6102D/NCT6106D specific data */
 
@@ -778,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
 
 static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
 static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
-static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
-static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
 
 static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
 static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -916,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
        return 1350000U / (reg << divreg);
 }
 
+static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
+{
+       return reg;
+}
+
 static u16 fan_to_reg(u32 fan, unsigned int divreg)
 {
        if (!fan)
@@ -968,6 +981,7 @@ struct nct6775_data {
        u16 reg_temp_config[NUM_TEMP];
        const char * const *temp_label;
        u32 temp_mask;
+       u32 virt_temp_mask;
 
        u16 REG_CONFIG;
        u16 REG_VBAT;
@@ -1275,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
        case nct6795:
        case nct6796:
                return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
-                 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
+                 (reg & 0xfff0) == 0x4c0 ||
                  reg == 0x402 ||
                  reg == 0x63a || reg == 0x63c || reg == 0x63e ||
                  reg == 0x640 || reg == 0x642 || reg == 0x64a ||
-                 reg == 0x64c || reg == 0x660 ||
+                 reg == 0x64c ||
                  reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
                  reg == 0x7b || reg == 0x7d;
        }
@@ -1557,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
                reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
                data->pwm_weight_temp_sel[i] = reg & 0x1f;
                /* If weight is disabled, report weight source as 0 */
-               if (j == 1 && !(reg & 0x80))
+               if (!(reg & 0x80))
                        data->pwm_weight_temp_sel[i] = 0;
 
                /* Weight temp data */
@@ -1681,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                        if (data->has_fan_min & BIT(i))
                                data->fan_min[i] = nct6775_read_value(data,
                                           data->REG_FAN_MIN[i]);
-                       data->fan_pulses[i] =
-                         (nct6775_read_value(data, data->REG_FAN_PULSES[i])
-                               >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+
+                       if (data->REG_FAN_PULSES[i]) {
+                               data->fan_pulses[i] =
+                                 (nct6775_read_value(data,
+                                                     data->REG_FAN_PULSES[i])
+                                  >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+                       }
 
                        nct6775_select_fan_div(dev, data, i, reg);
                }
@@ -2689,6 +2707,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
                return err;
        if (val > NUM_TEMP)
                return -EINVAL;
+       val = array_index_nospec(val, NUM_TEMP + 1);
        if (val && (!(data->have_temp & BIT(val - 1)) ||
                    !data->temp_src[val - 1]))
                return -EINVAL;
@@ -3637,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6776_temp_label;
                data->temp_mask = NCT6776_TEMP_MASK;
+               data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
 
                data->REG_VBAT = NCT6106_REG_VBAT;
                data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3715,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6775_temp_label;
                data->temp_mask = NCT6775_TEMP_MASK;
+               data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3787,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6776_temp_label;
                data->temp_mask = NCT6776_TEMP_MASK;
+               data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3851,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->ALARM_BITS = NCT6779_ALARM_BITS;
                data->BEEP_BITS = NCT6779_BEEP_BITS;
 
-               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg = fan_from_reg_rpm;
                data->fan_from_reg_min = fan_from_reg13;
                data->target_temp_mask = 0xff;
                data->tolerance_mask = 0x07;
@@ -3859,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6779_temp_label;
                data->temp_mask = NCT6779_TEMP_MASK;
+               data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3931,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->ALARM_BITS = NCT6791_ALARM_BITS;
                data->BEEP_BITS = NCT6779_BEEP_BITS;
 
-               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg = fan_from_reg_rpm;
                data->fan_from_reg_min = fan_from_reg13;
                data->target_temp_mask = 0xff;
                data->tolerance_mask = 0x07;
@@ -3942,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
                case nct6791:
                        data->temp_label = nct6779_temp_label;
                        data->temp_mask = NCT6791_TEMP_MASK;
+                       data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
                        break;
                case nct6792:
                        data->temp_label = nct6792_temp_label;
                        data->temp_mask = NCT6792_TEMP_MASK;
+                       data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
                        break;
                case nct6793:
                        data->temp_label = nct6793_temp_label;
                        data->temp_mask = NCT6793_TEMP_MASK;
+                       data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
                        break;
                case nct6795:
                        data->temp_label = nct6795_temp_label;
                        data->temp_mask = NCT6795_TEMP_MASK;
+                       data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
                        break;
                case nct6796:
                        data->temp_label = nct6796_temp_label;
                        data->temp_mask = NCT6796_TEMP_MASK;
+                       data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
                        break;
                }
 
@@ -4141,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
                 * for each fan reflects a different temperature, and there
                 * are no duplicates.
                 */
-               if (src != TEMP_SOURCE_VIRTUAL) {
+               if (!(data->virt_temp_mask & BIT(src))) {
                        if (mask & BIT(src))
                                continue;
                        mask |= BIT(src);
index fb4e4a6bb1f63c2b34749a31ad4b18105f297d5b..be5ba469089531b26b0ba2d0b1866c20f3e5312b 100644 (file)
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
index 6ec65adaba49569ab7b9775f856859a0fcfbd967..c33dcfb87993b531d40c0f2cdb4954b08f47668a 100644 (file)
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
        }
 #ifdef DEBUG
        if (jiffies != start && i2c_debug >= 3)
-               pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
-                        "high\n", jiffies - start);
+               pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
+                        jiffies - start);
 #endif
 
 done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
                setsda(adap, sb);
                udelay((adap->udelay + 1) / 2);
                if (sclhi(adap) < 0) { /* timed out */
-                       bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
-                               "timeout at bit #%d\n", (int)c, i);
+                       bit_dbg(1, &i2c_adap->dev,
+                               "i2c_outb: 0x%02x, timeout at bit #%d\n",
+                               (int)c, i);
                        return -ETIMEDOUT;
                }
                /* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
        }
        sdahi(adap);
        if (sclhi(adap) < 0) { /* timeout */
-               bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
-                       "timeout at ack\n", (int)c);
+               bit_dbg(1, &i2c_adap->dev,
+                       "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
                return -ETIMEDOUT;
        }
 
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
        sdahi(adap);
        for (i = 0; i < 8; i++) {
                if (sclhi(adap) < 0) { /* timeout */
-                       bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
-                               "#%d\n", 7 - i);
+                       bit_dbg(1, &i2c_adap->dev,
+                               "i2c_inb: timeout at bit #%d\n",
+                               7 - i);
                        return -ETIMEDOUT;
                }
                indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!scl) {
-               printk(KERN_WARNING "%s: SCL unexpected low "
-                      "while pulling SDA low!\n", name);
+               printk(KERN_WARNING
+                      "%s: SCL unexpected low while pulling SDA low!\n",
+                      name);
                goto bailout;
        }
 
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!scl) {
-               printk(KERN_WARNING "%s: SCL unexpected low "
-                      "while pulling SDA high!\n", name);
+               printk(KERN_WARNING
+                      "%s: SCL unexpected low while pulling SDA high!\n",
+                      name);
                goto bailout;
        }
 
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!sda) {
-               printk(KERN_WARNING "%s: SDA unexpected low "
-                      "while pulling SCL low!\n", name);
+               printk(KERN_WARNING
+                      "%s: SDA unexpected low while pulling SCL low!\n",
+                      name);
                goto bailout;
        }
 
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!sda) {
-               printk(KERN_WARNING "%s: SDA unexpected low "
-                      "while pulling SCL high!\n", name);
+               printk(KERN_WARNING
+                      "%s: SDA unexpected low while pulling SCL high!\n",
+                      name);
                goto bailout;
        }
 
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
                i2c_start(adap);
        }
        if (i && ret)
-               bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at "
-                       "0x%02x: %s\n", i + 1,
+               bit_dbg(1, &i2c_adap->dev,
+                       "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
                        addr & 1 ? "read from" : "write to", addr >> 1,
                        ret == 1 ? "success" : "failed, timeout?");
        return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
                                if (!(flags & I2C_M_NO_RD_ACK))
                                        acknak(i2c_adap, 0);
-                               dev_err(&i2c_adap->dev, "readbytes: invalid "
-                                       "block length (%d)\n", inval);
+                               dev_err(&i2c_adap->dev,
+                                       "readbytes: invalid block length (%d)\n",
+                                       inval);
                                return -EPROTO;
                        }
                        /* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        return -ENXIO;
                }
                if (flags & I2C_M_RD) {
-                       bit_dbg(3, &i2c_adap->dev, "emitting repeated "
-                               "start condition\n");
+                       bit_dbg(3, &i2c_adap->dev,
+                               "emitting repeated start condition\n");
                        i2c_repstart(adap);
                        /* okay, now switch into reading mode */
                        addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
                        }
                        ret = bit_doAddress(i2c_adap, pmsg);
                        if ((ret != 0) && !nak_ok) {
-                               bit_dbg(1, &i2c_adap->dev, "NAK from "
-                                       "device addr 0x%02x msg #%d\n",
+                               bit_dbg(1, &i2c_adap->dev,
+                                       "NAK from device addr 0x%02x msg #%d\n",
                                        msgs[i].addr, i);
                                goto bailout;
                        }
index e18442b9973ae69d27e193478b9b6a9777d6a10b..94d94b4a9a0d989d932101422eb87feeb9cf7525 100644 (file)
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
        i2c_set_adapdata(adap, dev);
 
        if (dev->pm_disabled) {
-               dev_pm_syscore_device(dev->dev, true);
                irq_flags = IRQF_NO_SUSPEND;
        } else {
                irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
index 1a8d2da5b000988c82f8f084f90f9576e7e758d8..b5750fd851251e74b0558576774da4a82d81c757 100644 (file)
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
+       if (i_dev->pm_disabled)
+               return 0;
+
        i_dev->disable(i_dev);
        i2c_dw_prepare_clk(i_dev, false);
 
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
-       i2c_dw_prepare_clk(i_dev, true);
+       if (!i_dev->pm_disabled)
+               i2c_dw_prepare_clk(i_dev, true);
+
        i_dev->init(i_dev);
 
        return 0;
index 941c223f64914450fc2e2d76a293cd6202441b0b..c91e145ef5a56dbb1a512c23611f06ad7d22aa05 100644 (file)
 
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
+#define SBREG_SMBCTRL_DNV      0xcf000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS         BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
        spin_unlock(&p2sb_spinlock);
 
        res = &tco_res[ICH_RES_MEM_OFF];
-       res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+       if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+       else
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
        res->end = res->start + 3;
        res->flags = IORESOURCE_MEM;
 
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv)
 }
 
 #ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+                                     acpi_physical_address address)
+{
+       return address >= priv->smba &&
+              address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
 static acpi_status
 i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
                     u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
         */
        mutex_lock(&priv->acpi_lock);
 
-       if (!priv->acpi_reserved) {
+       if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
                priv->acpi_reserved = true;
 
                dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
index 6d975f5221ca0e37410c9d2e770f7d78bfe270b2..06c4c767af322aa2704d5ae51a59e5c103579fc5 100644 (file)
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
 
 static const struct of_device_id lpi2c_imx_of_match[] = {
        { .compatible = "fsl,imx7ulp-lpi2c" },
-       { .compatible = "fsl,imx8dv-lpi2c" },
        { },
 };
 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
index 439e8778f849852f1a38cbf274502c4710e0065e..818cab14e87c5ea47e5c6daaa7b76c818465df52 100644 (file)
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
        pd->pos = pd->msg->len;
        pd->stop_after_dma = true;
 
-       i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
-
        iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
 }
 
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
        dma_async_issue_pending(chan);
 }
 
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
-                   bool do_init)
+static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+                    bool do_init)
 {
        if (do_init) {
                /* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
 
        /* Enable all interrupts to begin with */
        iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
-       return 0;
 }
 
 static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
                pd->stop_after_dma = false;
 
-               err = start_ch(pd, msg, do_start);
-               if (err)
-                       break;
+               start_ch(pd, msg, do_start);
 
                if (do_start)
                        i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                timeout = wait_event_timeout(pd->wait,
                                       pd->sr & (ICSR_TACK | SW_DONE),
                                       adapter->timeout);
+
+               /* 'stop_after_dma' tells if DMA transfer was complete */
+               i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+
                if (!timeout) {
                        dev_err(pd->dev, "Transfer request timed out\n");
                        if (pd->dma_direction != DMA_NONE)
index 9918bdd816196281ae6c64d364d34ecaf89a65ef..a403e8579b652b6e2486f746b91b9357c3ab7381 100644 (file)
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
                if (ret)
index bb181b0882919acef145d27ec236b705b8f2e7f0..454f914ae66dbd49931575122bb7c7dea662b11b 100644 (file)
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
                if (ret)
index 9a71e50d21f1fbae53ebdb2b1831e7f6067a67b9..0c51c0ffdda9d99d03f28c5503ef1cffaa5316c2 100644 (file)
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 {
        u8 rx_watermark;
        struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+       unsigned long flags;
 
        /* Clear and enable Rx full interrupt. */
        xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
                rx_watermark = IIC_RX_FIFO_DEPTH;
        xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
 
+       local_irq_save(flags);
        if (!(msg->flags & I2C_M_NOSTART))
                /* write the address */
                xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 
        xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
                msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+       local_irq_restore(flags);
+
        if (i2c->nmsgs == 1)
                /* very last, enable bus not busy as well */
                xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
index f15737763608b0d6c0473fc11f29991f7bfc692a..9ee9a15e71347629d024709a17b9a6bb04fca8aa 100644 (file)
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
 EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
 
 /**
- * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
- * @msg: the message to be synced with
+ * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
  * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
+ * @msg: the message which the buffer corresponds to
+ * @xferred: bool saying if the message was transferred
  */
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf)
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
 {
        if (!buf || buf == msg->buf)
                return;
 
-       if (msg->flags & I2C_M_RD)
+       if (xferred && msg->flags & I2C_M_RD)
                memcpy(msg->buf, buf, msg->len);
 
        kfree(buf);
 }
-EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf);
+EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
 
 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
 MODULE_DESCRIPTION("I2C-Bus main module");
index 7589f2ad1dae7de9fe04b5448b233fdda786db70..631360b14ca71c398e67a9425dff363de1eb255a 100644 (file)
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
 
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 {
-       u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
+       u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        struct st_lsm6dsx_sensor *cur_sensor;
        int i, err, data;
        __le16 wdata;
 
+       if (!hw->sip)
+               return 0;
+
        for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
                cur_sensor = iio_priv(hw->iio_devs[i]);
 
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
                                                       : cur_sensor->watermark;
 
                fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
-               sip += cur_sensor->sip;
        }
 
-       if (!sip)
-               return 0;
-
-       fifo_watermark = max_t(u16, fifo_watermark, sip);
-       fifo_watermark = (fifo_watermark / sip) * sip;
+       fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
+       fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
        fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 
        err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
index 54e383231d1edef07abf150793656fa0cb9e92e5..c31b9633f32d9b8bcac8d05d3822230d7725f5dc 100644 (file)
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
 static const struct spi_device_id maxim_thermocouple_id[] = {
        {"max6675", MAX6675},
        {"max31855", MAX31855},
-       {"max31856", MAX31855},
        {},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
index f72677291b692511ce7abcf1e9e8da9bf8f06326..a36c94930c31de8a03652cbf07844c10bcc09877 100644 (file)
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        dgid = (union ib_gid *) &addr->sib_addr;
        pkey = ntohs(addr->sib_pkey);
 
+       mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
                        if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                        cma_dev = cur_dev;
                                        sgid = gid;
                                        id_priv->id.port_num = p;
+                                       goto found;
                                }
                        }
                }
        }
-
-       if (!cma_dev)
-               return -ENODEV;
+       mutex_unlock(&lock);
+       return -ENODEV;
 
 found:
        cma_attach_to_dev(id_priv, cma_dev);
-       addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
-       memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+       mutex_unlock(&lock);
+       addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+       memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
        cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
        return 0;
 }
index 6eb64c6f08028b6c4a92407435bb2339128fe48b..c4118bcd5103565e3b20b6a970e67322061d195f 100644 (file)
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
                WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
                if (!uverbs_destroy_uobject(obj, reason))
                        ret = 0;
+               else
+                       atomic_set(&obj->usecnt, 0);
        }
        return ret;
 }
index ec8fb289621fb7590dd3e3f4000967fa2b6c9aae..5f437d1570fb02d516b1ee60f0a4cee42053da50 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
 {
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        f = fdget(cmd.fd);
        if (!f.file)
                return -ENOENT;
+       if (f.file->f_op != &ucma_fops) {
+               ret = -EINVAL;
+               goto file_put;
+       }
 
        /* Validate current fd and prevent destruction of id. */
        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
index 823beca448e10937d290b9ebaf0bf3aed444f05b..6d974e2363df249c4291a3331d9d16f0a14232a6 100644 (file)
@@ -1050,7 +1050,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 
        if (ib_uverbs_create_uapi(device, uverbs_dev))
-               goto err;
+               goto err_uapi;
 
        cdev_init(&uverbs_dev->cdev, NULL);
        uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1077,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
 
 err_class:
        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
 err_cdev:
        cdev_del(&uverbs_dev->cdev);
+err_uapi:
        clear_bit(devnum, dev_map);
-
 err:
        if (atomic_dec_and_test(&uverbs_dev->refcount))
                ib_uverbs_comp_dev(uverbs_dev);
index bbfb86eb2d24204565c78187faa6eb23984f24bf..bc2b9e03843903750aba02c0994430cd024cf4e3 100644 (file)
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
                                "Failed to destroy Shadow QP");
                        return rc;
                }
+               bnxt_qplib_free_qp_res(&rdev->qplib_res,
+                                      &rdev->qp1_sqp->qplib_qp);
                mutex_lock(&rdev->qp_lock);
                list_del(&rdev->qp1_sqp->list);
                atomic_dec(&rdev->qp_count);
index e426b990c1dd5bf3fa08966f41ccd08e3f3c4e44..6ad0d46ab879a6a1bf6d231e22b6e766b796efd2 100644 (file)
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
                                       struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_q *rq = &qp->rq;
-       struct bnxt_qplib_q *sq = &qp->rq;
+       struct bnxt_qplib_q *sq = &qp->sq;
        int rc = 0;
 
        if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
index b3203afa3b1de705c74da5aaec9ca55a3172b0d2..347fe18b1a41c0990c753aa29f2efaaea0cd119b 100644 (file)
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
        if (qhp->ibqp.uobject) {
+
+               /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+               if (qhp->wq.flushed)
+                       return;
+
+               qhp->wq.flushed = 1;
                t4_set_wq_in_error(&qhp->wq, 0);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
index eec83757d55f94331936518804672a72b8e29898..6c967dde58e702c228835a84c3b1ea19abc1ea64 100644 (file)
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
                }
 
        /*
-        * A secondary bus reset (SBR) issues a hot reset to our device.
-        * The following routine does a 1s wait after the reset is dropped
-        * per PCI Trhfa (recovery time).  PCIe 3.0 section 6.6.1 -
-        * Conventional Reset, paragraph 3, line 35 also says that a 1s
-        * delay after a reset is required.  Per spec requirements,
-        * the link is either working or not after that point.
+        * This is an end around to do an SBR during probe time. A new API needs
+        * to be implemented to have cleaner interface but this fixes the
+        * current brokenness
         */
-       return pci_reset_bus(dev);
+       return pci_bridge_secondary_bus_reset(dev->bus->self);
 }
 
 /*
index ca0f1ee26091b38200b2447ccfe92324a3c0bd32..0bbeaaae47e07d2e99529925bb6236ab98be377b 100644 (file)
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->quotas.qp;
        props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
-       props->max_send_sge        = dev->dev->caps.max_sq_sg;
-       props->max_recv_sge        = dev->dev->caps.max_rq_sg;
-       props->max_sge_rd          = MLX4_MAX_SGE_RD;
+       props->max_send_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_recv_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_sge_rd = MLX4_MAX_SGE_RD;
        props->max_cq              = dev->dev->quotas.cq;
        props->max_cqe             = dev->dev->caps.max_cqes;
        props->max_mr              = dev->dev->quotas.mpt;
index ea01b8dd2be606206193408ca7bf9c42e4ea4e39..3d5424f335cb06e1dcaad16a4f4c3c7910d3295e 100644 (file)
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
 
        skb_queue_head_init(&skqueue);
 
+       netif_tx_lock_bh(p->dev);
        spin_lock_irq(&priv->lock);
        set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
        if (p->neigh)
                while ((skb = __skb_dequeue(&p->neigh->queue)))
                        __skb_queue_tail(&skqueue, skb);
        spin_unlock_irq(&priv->lock);
+       netif_tx_unlock_bh(p->dev);
 
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = p->dev;
index d393160e3e384a454e01c70c5e41e49508d13745..258115b10fa9e448129d84a0663ab99d51c64ee5 100644 (file)
@@ -521,10 +521,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
        u32 int_status;
        dma_addr_t iova;
        irqreturn_t ret = IRQ_NONE;
-       int i;
+       int i, err;
 
-       if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
-               return 0;
+       err = pm_runtime_get_if_in_use(iommu->dev);
+       if (WARN_ON_ONCE(err <= 0))
+               return ret;
 
        if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
                goto out;
@@ -620,11 +621,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
        spin_lock_irqsave(&rk_domain->iommus_lock, flags);
        list_for_each(pos, &rk_domain->iommus) {
                struct rk_iommu *iommu;
+               int ret;
 
                iommu = list_entry(pos, struct rk_iommu, node);
 
                /* Only zap TLBs of IOMMUs that are powered on. */
-               if (pm_runtime_get_if_in_use(iommu->dev)) {
+               ret = pm_runtime_get_if_in_use(iommu->dev);
+               if (WARN_ON_ONCE(ret < 0))
+                       continue;
+               if (ret) {
                        WARN_ON(clk_bulk_enable(iommu->num_clocks,
                                                iommu->clocks));
                        rk_iommu_zap_lines(iommu, iova, size);
@@ -891,6 +896,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
        struct rk_iommu *iommu;
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
+       int ret;
 
        /* Allow 'virtual devices' (eg drm) to detach from domain */
        iommu = rk_iommu_from_dev(dev);
@@ -909,7 +915,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
        list_del_init(&iommu->node);
        spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-       if (pm_runtime_get_if_in_use(iommu->dev)) {
+       ret = pm_runtime_get_if_in_use(iommu->dev);
+       WARN_ON_ONCE(ret < 0);
+       if (ret > 0) {
                rk_iommu_disable(iommu);
                pm_runtime_put(iommu->dev);
        }
@@ -946,7 +954,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        list_add_tail(&iommu->node, &rk_domain->iommus);
        spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
 
-       if (!pm_runtime_get_if_in_use(iommu->dev))
+       ret = pm_runtime_get_if_in_use(iommu->dev);
+       if (!ret || WARN_ON_ONCE(ret < 0))
                return 0;
 
        ret = rk_iommu_enable(iommu);
@@ -1151,17 +1160,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
        if (iommu->num_mmu == 0)
                return PTR_ERR(iommu->bases[0]);
 
-       i = 0;
-       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
-               if (irq < 0)
-                       return irq;
-
-               err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
-                                      IRQF_SHARED, dev_name(dev), iommu);
-               if (err)
-                       return err;
-       }
-
        iommu->reset_disabled = device_property_read_bool(dev,
                                        "rockchip,disable-mmu-reset");
 
@@ -1218,6 +1216,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
 
+       i = 0;
+       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+               if (irq < 0)
+                       return irq;
+
+               err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+                                      IRQF_SHARED, dev_name(dev), iommu);
+               if (err) {
+                       pm_runtime_disable(dev);
+                       goto err_remove_sysfs;
+               }
+       }
+
        return 0;
 err_remove_sysfs:
        iommu_device_sysfs_remove(&iommu->iommu);
index faf734ff4cf3bc69c2a27d8b0ef1555530584fd1..0f6e30e9009da533c6a4bc6c3cf5e79fe2ae748c 100644 (file)
@@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
        return 0;
 }
 
+#ifdef CONFIG_SMP
 static void bcm7038_l1_cpu_offline(struct irq_data *d)
 {
        struct cpumask *mask = irq_data_get_affinity_mask(d);
@@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
        }
        irq_set_affinity_locked(d, &new_affinity, false);
 }
+#endif
 
 static int __init bcm7038_l1_init_one(struct device_node *dn,
                                      unsigned int idx,
@@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
        .irq_mask               = bcm7038_l1_mask,
        .irq_unmask             = bcm7038_l1_unmask,
        .irq_set_affinity       = bcm7038_l1_set_affinity,
+#ifdef CONFIG_SMP
        .irq_cpu_offline        = bcm7038_l1_cpu_offline,
+#endif
 };
 
 static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
index 316a57530f6d108ace5345f0e4077e8f20771924..c2df341ff6fafd83df6463669c9ad6a1ceac56c9 100644 (file)
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
  * The consequence of the above is that allocation is cost is low, but
  * freeing is expensive. We assumes that freeing rarely occurs.
  */
+#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
 static DEFINE_MUTEX(lpi_range_lock);
 static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+       lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+                               ITS_MAX_LPI_NRBITS);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
index e214181b77b7720568c072c0b0cb07bc81766565..d5912f1ec8848d19ff93161102d6690eb7bb43d2 100644 (file)
@@ -861,7 +861,9 @@ static struct irq_chip gic_chip = {
        .irq_set_affinity       = gic_set_affinity,
        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
-       .flags                  = IRQCHIP_SET_TYPE_MASKED,
+       .flags                  = IRQCHIP_SET_TYPE_MASKED |
+                                 IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_MASK_ON_SUSPEND,
 };
 
 static struct irq_chip gic_eoimode1_chip = {
@@ -874,7 +876,9 @@ static struct irq_chip gic_eoimode1_chip = {
        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
        .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
-       .flags                  = IRQCHIP_SET_TYPE_MASKED,
+       .flags                  = IRQCHIP_SET_TYPE_MASKED |
+                                 IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_MASK_ON_SUSPEND,
 };
 
 #define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
index f6fd57ebe6e6468f3ce98ec90cb1b7de5b9e6bfc..c19766fe8a1ae9c8a1573d304119fe36a5540f90 100644 (file)
@@ -250,7 +250,7 @@ static int s3c_irqext0_type(struct irq_data *data, unsigned int type)
        void __iomem *gpcon_reg;
        unsigned long gpcon_offset, extint_offset;
 
-       if ((data->hwirq >= 0) && (data->hwirq <= 3)) {
+       if (data->hwirq <= 3) {
                gpcon_reg = S3C2410_GPFCON;
                extint_reg = S3C24XX_EXTINT0;
                gpcon_offset = (data->hwirq) * 2;
index 3df527fcf4e154b44bed2cc3c65787fef503db6c..0a2088e12d96fdf5525b48613ac818e5061064c9 100644 (file)
@@ -603,17 +603,24 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
                                        sizeof(struct stm32_exti_chip_data),
                                        GFP_KERNEL);
        if (!host_data->chips_data)
-               return NULL;
+               goto free_host_data;
 
        host_data->base = of_iomap(node, 0);
        if (!host_data->base) {
                pr_err("%pOF: Unable to map registers\n", node);
-               return NULL;
+               goto free_chips_data;
        }
 
        stm32_host_data = host_data;
 
        return host_data;
+
+free_chips_data:
+       kfree(host_data->chips_data);
+free_host_data:
+       kfree(host_data);
+
+       return NULL;
 }
 
 static struct
@@ -665,10 +672,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
        struct irq_domain *domain;
 
        host_data = stm32_exti_host_init(drv_data, node);
-       if (!host_data) {
-               ret = -ENOMEM;
-               goto out_free_mem;
-       }
+       if (!host_data)
+               return -ENOMEM;
 
        domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
                                       &irq_exti_domain_ops, NULL);
@@ -725,7 +730,6 @@ out_free_domain:
        irq_domain_remove(domain);
 out_unmap:
        iounmap(host_data->base);
-out_free_mem:
        kfree(host_data->chips_data);
        kfree(host_data);
        return ret;
@@ -752,10 +756,8 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
        }
 
        host_data = stm32_exti_host_init(drv_data, node);
-       if (!host_data) {
-               ret = -ENOMEM;
-               goto out_free_mem;
-       }
+       if (!host_data)
+               return -ENOMEM;
 
        for (i = 0; i < drv_data->bank_nr; i++)
                stm32_exti_chip_init(host_data, i, node);
@@ -777,7 +779,6 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
 
 out_unmap:
        iounmap(host_data->base);
-out_free_mem:
        kfree(host_data->chips_data);
        kfree(host_data);
        return ret;
index 0c085303a58302cc982f69e52b0a5ea8e5c60c25..580e2d72b9ba42e9e5887296a73d0d5279a496a0 100644 (file)
@@ -205,8 +205,7 @@ static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
 
        tangox_irq_domain_init(dom);
 
-       irq_set_chained_handler(irq, tangox_irq_handler);
-       irq_set_handler_data(irq, dom);
+       irq_set_chained_handler_and_data(irq, tangox_irq_handler, dom);
 
        return 0;
 }
index f266c81f396fe7a82032827c92c95896f168deba..0481223b1deb826af43fa8f0da0b81e5b6654009 100644 (file)
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
        int err;
 
        desc->tfm = essiv->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
        shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
        int i, r;
 
        desc->tfm = lmk->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
 
        /* calculate crc32 for every 32bit part and xor it */
        desc->tfm = tcw->crc32_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
        for (i = 0; i < 4; i++) {
                r = crypto_shash_init(desc);
                if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        skcipher_request_set_callback(ctx->r.req,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
 }
 
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        aead_request_set_callback(ctx->r.req_aead,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
 }
 
index 37887859946631cb40c42256f0ba3d4d5a471132..89ccb64342de7a4fa8e03d528f66ab9b726e0539 100644 (file)
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
        unsigned j, size;
 
        desc->tfm = ic->journal_mac;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 {
        int r;
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      complete_journal_encrypt, comp);
        if (likely(encrypt))
                r = crypto_skcipher_encrypt(req);
index cae689de75fd5a719c406f1093db076c201fcdef..5ba067fa0c729bc89b7789bd35648b1189003b27 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -29,9 +29,6 @@
  */
 #define        MIN_RAID456_JOURNAL_SPACE (4*2048)
 
-/* Global list of all raid sets */
-static LIST_HEAD(raid_sets);
-
 static bool devices_handle_discard_safely = false;
 
 /*
@@ -227,7 +224,6 @@ struct rs_layout {
 
 struct raid_set {
        struct dm_target *ti;
-       struct list_head list;
 
        uint32_t stripe_cache_entries;
        unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
        mddev->new_chunk_sectors = l->new_chunk_sectors;
 }
 
-/* Find any raid_set in active slot for @rs on global list */
-static struct raid_set *rs_find_active(struct raid_set *rs)
-{
-       struct raid_set *r;
-       struct mapped_device *md = dm_table_get_md(rs->ti->table);
-
-       list_for_each_entry(r, &raid_sets, list)
-               if (r != rs && dm_table_get_md(r->ti->table) == md)
-                       return r;
-
-       return NULL;
-}
-
 /* raid10 algorithms (i.e. formats) */
 #define        ALGORITHM_RAID10_DEFAULT        0
 #define        ALGORITHM_RAID10_NEAR           1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
 
        mddev_init(&rs->md);
 
-       INIT_LIST_HEAD(&rs->list);
        rs->raid_disks = raid_devs;
        rs->delta_disks = 0;
 
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        for (i = 0; i < raid_devs; i++)
                md_rdev_init(&rs->dev[i].rdev);
 
-       /* Add @rs to global list. */
-       list_add(&rs->list, &raid_sets);
-
        /*
         * Remaining items to be initialized by further RAID params:
         *  rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        return rs;
 }
 
-/* Free all @rs allocations and remove it from global list. */
+/* Free all @rs allocations */
 static void raid_set_free(struct raid_set *rs)
 {
        int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
                        dm_put_device(rs->ti, rs->dev[i].data_dev);
        }
 
-       list_del(&rs->list);
-
        kfree(rs);
 }
 
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
                return 0;
        }
 
-       /* HM FIXME: get InSync raid_dev? */
+       /* HM FIXME: get In_Sync raid_dev? */
        rdev = &rs->dev[0].rdev;
 
        if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
                rs_set_new(rs);
        } else if (rs_is_recovering(rs)) {
+               /* Rebuild particular devices */
+               if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+                       set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+                       rs_setup_recovery(rs, MaxSector);
+               }
                /* A recovering raid set may be resized */
                ; /* skip setup rs */
        } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        /* Start raid set read-only and assumed clean to change in raid_resume() */
        rs->md.ro = 1;
        rs->md.in_sync = 1;
+
+       /* Keep array frozen */
        set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
 
        /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        rs->callbacks.congested_fn = raid_is_congested;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
-       /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
+       /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
        if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
                r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
                if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+/* Return sync state string for @state */
+enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
+static const char *sync_str(enum sync_state state)
+{
+       /* Has to be in above sync_state order! */
+       static const char *sync_strs[] = {
+               "frozen",
+               "reshape",
+               "resync",
+               "check",
+               "repair",
+               "recover",
+               "idle"
+       };
+
+       return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
+};
+
+/* Return enum sync_state for @mddev derived from @recovery flags */
+static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
-               return "frozen";
+               return st_frozen;
 
-       /* The MD sync thread can be done with io but still be running */
+       /* The MD sync thread can be done with io or be interrupted but still be running */
        if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
            (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
                if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
-                       return "reshape";
+                       return st_reshape;
 
                if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
                        if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
-                               return "resync";
-                       else if (test_bit(MD_RECOVERY_CHECK, &recovery))
-                               return "check";
-                       return "repair";
+                               return st_resync;
+                       if (test_bit(MD_RECOVERY_CHECK, &recovery))
+                               return st_check;
+                       return st_repair;
                }
 
                if (test_bit(MD_RECOVERY_RECOVER, &recovery))
-                       return "recover";
+                       return st_recover;
+
+               if (mddev->reshape_position != MaxSector)
+                       return st_reshape;
        }
 
-       return "idle";
+       return st_idle;
 }
 
 /*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                                sector_t resync_max_sectors)
 {
        sector_t r;
+       enum sync_state state;
        struct mddev *mddev = &rs->md;
 
        clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
        } else {
-               if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
-                   !test_bit(MD_RECOVERY_INTR, &recovery) &&
-                   (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
-                    test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-                    test_bit(MD_RECOVERY_RUNNING, &recovery)))
-                       r = mddev->curr_resync_completed;
-               else
+               state = decipher_sync_action(mddev, recovery);
+
+               if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
                        r = mddev->recovery_cp;
+               else
+                       r = mddev->curr_resync_completed;
 
-               if (r >= resync_max_sectors &&
-                   (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
-                    (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
-                     !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
-                     !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
+               if (state == st_idle && r >= resync_max_sectors) {
                        /*
                         * Sync complete.
                         */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                        if (test_bit(MD_RECOVERY_RECOVER, &recovery))
                                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+               } else if (state == st_recover)
                        /*
                         * In case we are recovering, the array is not in sync
                         * and health chars should show the recovering legs.
                         */
                        ;
-
-               } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_resync)
                        /*
                         * If "resync" is occurring, the raid set
                         * is or may be out of sync hence the health
                         * characters shall be 'a'.
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
-
-               } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_reshape)
                        /*
                         * If "reshape" is occurring, the raid set
                         * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_check || state == st_repair)
                        /*
                         * If "check" or "repair" is occurring, the raid set has
                         * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               else {
+               else {
                        struct md_rdev *rdev;
 
                        /*
                         * We are idle and recovery is needed, prevent 'A' chars race
-                        * caused by components still set to in-sync by constrcuctor.
+                        * caused by components still set to in-sync by constructor.
                         */
                        if (test_bit(MD_RECOVERY_NEEDED, &recovery))
                                set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                progress = rs_get_progress(rs, recovery, resync_max_sectors);
                resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
                                    atomic64_read(&mddev->resync_mismatches) : 0;
-               sync_action = decipher_sync_action(&rs->md, recovery);
+               sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
 
                /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
                for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
        struct mddev *mddev = &rs->md;
        struct md_personality *pers = mddev->pers;
 
+       /* Don't allow the sync thread to work until the table gets reloaded. */
+       set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+
        r = rs_setup_reshape(rs);
        if (r)
                return r;
 
-       /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
-       if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
-               mddev_resume(mddev);
-
        /*
         * Check any reshape constraints enforced by the personalility
         *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
                }
        }
 
-       /* Suspend because a resume will happen in raid_resume() */
-       set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
-       mddev_suspend(mddev);
-
        /*
         * Now reshape got set up, update superblocks to
         * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
        if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
                return 0;
 
-       if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
-               struct raid_set *rs_active = rs_find_active(rs);
-
-               if (rs_active) {
-                       /*
-                        * In case no rebuilds have been requested
-                        * and an active table slot exists, copy
-                        * current resynchonization completed and
-                        * reshape position pointers across from
-                        * suspended raid set in the active slot.
-                        *
-                        * This resumes the new mapping at current
-                        * offsets to continue recover/reshape without
-                        * necessarily redoing a raid set partially or
-                        * causing data corruption in case of a reshape.
-                        */
-                       if (rs_active->md.curr_resync_completed != MaxSector)
-                               mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
-                       if (rs_active->md.reshape_position != MaxSector)
-                               mddev->reshape_position = rs_active->md.reshape_position;
-               }
-       }
-
        /*
         * The superblocks need to be updated on disk if the
         * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 13, 2},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 72142021b5c9a0410cfb6ccb04a93d613376fb53..74f6770c70b12404e965345ec561318bf2225fcb 100644 (file)
@@ -188,6 +188,12 @@ struct dm_pool_metadata {
        unsigned long flags;
        sector_t data_block_size;
 
+       /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+       dm_block_t metadata_reserve;
+
        /*
         * Set if a transaction has to be aborted but the attempt to roll back
         * to the previous (good) transaction failed.  The only pool metadata
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+       int r;
+       dm_block_t total;
+       dm_block_t max_blocks = 4096; /* 16M */
+
+       r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+       if (r) {
+               DMERR("could not get size of metadata device");
+               pmd->metadata_reserve = max_blocks;
+       } else {
+               sector_div(total, 10);
+               pmd->metadata_reserve = min(max_blocks, total);
+       }
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                                               sector_t data_block_size,
                                               bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                return ERR_PTR(r);
        }
 
+       __set_metadata_reserve(pmd);
+
        return pmd;
 }
 
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
        down_read(&pmd->root_lock);
        if (!pmd->fail_io)
                r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+       if (!r) {
+               if (*result < pmd->metadata_reserve)
+                       *result = 0;
+               else
+                       *result -= pmd->metadata_reserve;
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
        int r = -EINVAL;
 
        down_write(&pmd->root_lock);
-       if (!pmd->fail_io)
+       if (!pmd->fail_io) {
                r = __resize_space_map(pmd->metadata_sm, new_count);
+               if (!r)
+                       __set_metadata_reserve(pmd);
+       }
        up_write(&pmd->root_lock);
 
        return r;
index 7bd60a150f8faec85071cf1d984bd35d27b08391..aaf1ad481ee88e59b90273133b7a7bd90c69d3fb 100644 (file)
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
        PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
+
+       /*
+        * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+        */
+       PM_OUT_OF_METADATA_SPACE,
        PM_READ_ONLY,           /* metadata may not be changed */
+
        PM_FAIL,                /* all I/O fails */
 };
 
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+       return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+       return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+       int r;
+       const char *ooms_reason = NULL;
+       dm_block_t nr_free;
+
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+       if (r)
+               ooms_reason = "Could not get free metadata blocks";
+       else if (!nr_free)
+               ooms_reason = "No free metadata blocks";
+
+       if (ooms_reason && !is_read_only(pool)) {
+               DMERR("%s", ooms_reason);
+               set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+       }
+}
+
+static void check_for_data_space(struct pool *pool)
 {
        int r;
        dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY)
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-       else
-               check_for_space(pool);
+       else {
+               check_for_metadata_space(pool);
+               check_for_data_space(pool);
+       }
 
        return r;
 }
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
        }
 
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+               return r;
+       }
+
+       if (!free_blocks) {
+               /* Let's commit before we use up the metadata reserve. */
+               r = commit(pool);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
        case PM_OUT_OF_DATA_SPACE:
                return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
        case PM_FAIL:
                return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                error_retry_list(pool);
                break;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (old_mode != new_mode)
+               if (!is_read_only_pool_mode(old_mode))
                        notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
                       sb_metadata_dev_size, metadata_dev_size);
+
+               if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+                       set_pool_mode(pool, PM_WRITE);
+
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
                DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
                      dm_device_name(pool->pool_md));
                return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
        dm_block_t nr_blocks_data;
        dm_block_t nr_blocks_metadata;
        dm_block_t held_root;
+       enum pool_mode mode;
        char buf[BDEVNAME_SIZE];
        char buf2[BDEVNAME_SIZE];
        struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+               mode = get_pool_mode(pool);
+               if (mode == PM_OUT_OF_DATA_SPACE)
                        DMEMIT("out_of_data_space ");
-               else if (pool->pf.mode == PM_READ_ONLY)
+               else if (is_read_only_pool_mode(mode))
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
index 12decdbd722d866ed48a0a6a8d8b8f2e0a6aefbc..fc65f0dedf7f702b31d6adf21b8a2b26238c8b43 100644 (file)
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
 {
        struct scatterlist sg;
 
-       sg_init_one(&sg, data, len);
-       ahash_request_set_crypt(req, &sg, NULL, len);
-
-       return crypto_wait_req(crypto_ahash_update(req), wait);
+       if (likely(!is_vmalloc_addr(data))) {
+               sg_init_one(&sg, data, len);
+               ahash_request_set_crypt(req, &sg, NULL, len);
+               return crypto_wait_req(crypto_ahash_update(req), wait);
+       } else {
+               do {
+                       int r;
+                       size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+                       flush_kernel_vmap_range((void *)data, this_step);
+                       sg_init_table(&sg, 1);
+                       sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+                       ahash_request_set_crypt(req, &sg, NULL, this_step);
+                       r = crypto_wait_req(crypto_ahash_update(req), wait);
+                       if (unlikely(r))
+                               return r;
+                       data += this_step;
+                       len -= this_step;
+               } while (len);
+               return 0;
+       }
 }
 
 /*
index 3a28a68f184ca5baccaa541ad68fd60eade09df7..5f1f80d424dd370f306e15033e12bb77de72f3b7 100644 (file)
@@ -268,9 +268,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                i = 0;
                do {
                        long daa;
-                       void *dummy_addr;
                        daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
-                                               &dummy_addr, &pfn);
+                                               NULL, &pfn);
                        if (daa <= 0) {
                                r = daa ? daa : -EINVAL;
                                goto err3;
index 94329e03001ec8d0ca15ff4ad0d04e7fdebd0b5d..0b2af6e74fc375ed163824fa9cdaf84d1b9ffd95 100644 (file)
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
 static int resync_finish(struct mddev *mddev)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
+       int ret = 0;
 
        clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
-       dlm_unlock_sync(cinfo->resync_lockres);
 
        /*
         * If resync thread is interrupted so we can't say resync is finished,
         * another node will launch resync thread to continue.
         */
-       if (test_bit(MD_CLOSING, &mddev->flags))
-               return 0;
-       else
-               return resync_info_update(mddev, 0, 0);
+       if (!test_bit(MD_CLOSING, &mddev->flags))
+               ret = resync_info_update(mddev, 0, 0);
+       dlm_unlock_sync(cinfo->resync_lockres);
+       return ret;
 }
 
 static int area_resyncing(struct mddev *mddev, int direction,
index 9818980494914eb72e085173fc015e012afb5dce..d6f7978b4449e92aba522035941ff51137e2ce38 100644 (file)
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
                allow_barrier(conf);
        }
 
+       raise_barrier(conf, 0);
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
-       raise_barrier(conf, sectors_done != 0);
+       raise_barrier(conf, 1);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
        r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
        if (sector_nr <= last)
                goto read_more;
 
+       lower_barrier(conf);
+
        /* Now that we have done the whole section we can
         * update reshape_progress
         */
index a001808a2b77da16bc2ae3c1d4aa32b5f944a939..bfb81140706140a53af24ccf90420e6cc107cef1 100644 (file)
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
 extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
+static inline bool raid5_has_log(struct r5conf *conf)
+{
+       return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+}
+
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
        return test_bit(MD_HAS_PPL, &conf->mddev->flags);
index 4ce0d7502fad84ac9422b6fc00404136340397ac..e4e98f47865def0449979058c6c7e51228b9b42b 100644 (file)
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return false;
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
                !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
        sector_t newsize;
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        sectors &= ~((sector_t)conf->chunk_sectors - 1);
        newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        if (mddev->delta_disks == 0 &&
            mddev->new_layout == mddev->layout &&
index fcdf3d5dc4b6c0722bdd3481ad2536529982605a..3bae24b15eaa4cbd5e25485121e50ddcb5991af2 100644 (file)
@@ -585,18 +585,12 @@ int __must_check media_device_register_entity(struct media_device *mdev,
        entity->num_links = 0;
        entity->num_backlinks = 0;
 
-       if (!ida_pre_get(&mdev->entity_internal_idx, GFP_KERNEL))
-               return -ENOMEM;
-
-       mutex_lock(&mdev->graph_mutex);
-
-       ret = ida_get_new_above(&mdev->entity_internal_idx, 1,
-                               &entity->internal_idx);
-       if (ret < 0) {
-               mutex_unlock(&mdev->graph_mutex);
+       ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL);
+       if (ret < 0)
                return ret;
-       }
+       entity->internal_idx = ret;
 
+       mutex_lock(&mdev->graph_mutex);
        mdev->entity_internal_idx_max =
                max(mdev->entity_internal_idx_max, entity->internal_idx);
 
@@ -642,7 +636,7 @@ static void __media_device_unregister_entity(struct media_entity *entity)
        struct media_interface *intf;
        unsigned int i;
 
-       ida_simple_remove(&mdev->entity_internal_idx, entity->internal_idx);
+       ida_free(&mdev->entity_internal_idx, entity->internal_idx);
 
        /* Remove all interface links pointing to this entity */
        list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
index 31112f622b884f2577d46caec86b2b63370e81de..475e5b3790edb4085055fcd38df2f6d77d98388d 100644 (file)
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
                        if (ret < 0)
                                goto error;
                }
-       } else {
+       } else if (pdata) {
                for (i = 0; i < pdata->num_sub_devices; i++) {
                        pdata->sub_devices[i].dev.parent = dev;
                        ret = platform_device_register(&pdata->sub_devices[i]);
index 4d4acf763b655c72353ce4e9d2bbff9d962dd957..2c43fd09d602fd0abddf3996213eb1446a894b46 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/gfp.h>
 
 static DEFINE_IDA(cb710_ida);
-static DEFINE_SPINLOCK(cb710_ida_lock);
 
 void cb710_pci_update_config_reg(struct pci_dev *pdev,
        int reg, uint32_t mask, uint32_t xor)
@@ -205,7 +204,6 @@ static int cb710_probe(struct pci_dev *pdev,
        const struct pci_device_id *ent)
 {
        struct cb710_chip *chip;
-       unsigned long flags;
        u32 val;
        int err;
        int n = 0;
@@ -256,18 +254,10 @@ static int cb710_probe(struct pci_dev *pdev,
        if (err)
                return err;
 
-       do {
-               if (!ida_pre_get(&cb710_ida, GFP_KERNEL))
-                       return -ENOMEM;
-
-               spin_lock_irqsave(&cb710_ida_lock, flags);
-               err = ida_get_new(&cb710_ida, &chip->platform_id);
-               spin_unlock_irqrestore(&cb710_ida_lock, flags);
-
-               if (err && err != -EAGAIN)
-                       return err;
-       } while (err);
-
+       err = ida_alloc(&cb710_ida, GFP_KERNEL);
+       if (err < 0)
+               return err;
+       chip->platform_id = err;
 
        dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n",
                chip->platform_id, chip->iobase, pdev->irq);
@@ -308,7 +298,6 @@ unreg_mmc:
 static void cb710_remove_one(struct pci_dev *pdev)
 {
        struct cb710_chip *chip = pci_get_drvdata(pdev);
-       unsigned long flags;
 
        cb710_unregister_slot(chip, CB710_SLOT_SM);
        cb710_unregister_slot(chip, CB710_SLOT_MS);
@@ -317,9 +306,7 @@ static void cb710_remove_one(struct pci_dev *pdev)
        BUG_ON(atomic_read(&chip->slot_refs_count) != 0);
 #endif
 
-       spin_lock_irqsave(&cb710_ida_lock, flags);
-       ida_remove(&cb710_ida, chip->platform_id);
-       spin_unlock_irqrestore(&cb710_ida_lock, flags);
+       ida_free(&cb710_ida, chip->platform_id);
 }
 
 static const struct pci_device_id cb710_pci_tbl[] = {
index eeb7eef62174c4dfb5c8854efbef2d798f161f1e..38f90e17992717d7cd0f12ad904490abbaa2d1a5 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
+#include <linux/nospec.h>
 
 static DEFINE_MUTEX(compass_mutex);
 
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
                return ret;
        if (val >= strlen(map))
                return -EINVAL;
+       val = array_index_nospec(val, strlen(map));
        mutex_lock(&compass_mutex);
        ret = compass_command(c, map[val]);
        mutex_unlock(&compass_mutex);
index 8f82bb9d11e2e3bfb6fbbf3764180a1b7fb0d0a2..b8aaa684c397b0b8be8fe0c5ae00a37b087b6997 100644 (file)
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
        retrc = plpar_hcall_norets(H_REG_CRQ,
                                   vdev->unit_address,
                                   queue->msg_token, PAGE_SIZE);
-       retrc = rc;
+       rc = retrc;
 
        if (rc == H_RESOURCE)
                rc = ibmvmc_reset_crq_queue(adapter);
index 7bba62a72921ebe405f36306e9233afc910351b4..fc3872fe7b2510e1fa862ea508be53ad377a4bfd 100644 (file)
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
        cl = cldev->cl;
 
+       mutex_lock(&bus->device_lock);
        if (cl->state == MEI_FILE_UNINITIALIZED) {
-               mutex_lock(&bus->device_lock);
                ret = mei_cl_link(cl);
-               mutex_unlock(&bus->device_lock);
                if (ret)
-                       return ret;
+                       goto out;
                /* update pointers */
                cl->cldev = cldev;
        }
 
-       mutex_lock(&bus->device_lock);
        if (mei_cl_is_connected(cl)) {
                ret = 0;
                goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
        if (err < 0)
                dev_err(bus->dev, "Could not disconnect from the ME client\n");
 
-out:
        mei_cl_bus_module_put(cldev);
-
+out:
        /* Flush queues and remove any pending read */
        mei_cl_flush_queues(cl, NULL);
        mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
 
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
+       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
 
 static const struct device_type mei_cl_device_type = {
-       .release        = mei_cl_bus_dev_release,
+       .release = mei_cl_bus_dev_release,
 };
 
 /**
index 4ab6251d418ecf4fac7ec93a4b8101db313f5249..ebdcf0b450e25bb00be1db564fd09f806e87edad 100644 (file)
@@ -1767,7 +1767,7 @@ out:
                }
        }
 
-       rets = buf->size;
+       rets = len;
 err:
        cl_dbg(dev, cl, "rpm: autosuspend\n");
        pm_runtime_mark_last_busy(dev->dev);
index 09e233d4c0de386eab9ea2f2ce178dedf4ebb480..e56f3e72d57a06cc4a9c2e8358bd3359113524c8 100644 (file)
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                props_res = (struct hbm_props_response *)mei_msg;
 
-               if (props_res->status) {
+               if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+                       dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+                               props_res->me_addr);
+               } else if (props_res->status) {
                        dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
                                props_res->status,
                                mei_hbm_status_str(props_res->status));
                        return -EPROTO;
+               } else {
+                       mei_hbm_me_cl_add(dev, props_res);
                }
 
-               mei_hbm_me_cl_add(dev, props_res);
-
                /* request property for the next client */
                if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
                        return -EIO;
index 648eb6743ed58890f356212ed60a81ca5e6dd43c..6edffeed99534935f320b5b3d6e941dad15a968d 100644 (file)
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
        mmc_exit_request(mq->queue, req);
 }
 
-/*
- * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
- * will not be dispatched in parallel.
- */
 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                                    const struct blk_mq_queue_data *bd)
 {
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_lock_irq(q->queue_lock);
 
-       if (mq->recovery_needed) {
+       if (mq->recovery_needed || mq->busy) {
                spin_unlock_irq(q->queue_lock);
                return BLK_STS_RESOURCE;
        }
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        }
 
+       /* Parallel dispatch of requests is not supported at the moment */
+       mq->busy = true;
+
        mq->in_flight[issue_type] += 1;
        get_card = (mmc_tot_in_flight(mq) == 1);
        cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                mq->in_flight[issue_type] -= 1;
                if (mmc_tot_in_flight(mq) == 0)
                        put_card = true;
+               mq->busy = false;
                spin_unlock_irq(q->queue_lock);
                if (put_card)
                        mmc_put_card(card, &mq->ctx);
+       } else {
+               WRITE_ONCE(mq->busy, false);
        }
 
        return ret;
index 17e59d50b4960298630404e7dbe4a687eb8e9a19..9bf3c924507552caf0bf50a3118944e0390bc044 100644 (file)
@@ -81,6 +81,7 @@ struct mmc_queue {
        unsigned int            cqe_busy;
 #define MMC_CQE_DCMD_BUSY      BIT(0)
 #define MMC_CQE_QUEUE_FULL     BIT(1)
+       bool                    busy;
        bool                    use_cqe;
        bool                    recovery_needed;
        bool                    in_recovery;
index 294de177632c6e25a2024d42f8fbdd926043a80f..61e4e2a213c9637f73faeda61dc585c7922faab8 100644 (file)
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
                         * We don't really have DMA, so we need
                         * to copy from our platform driver buffer
                         */
-                       sg_copy_to_buffer(data->sg, 1, host->virt_base,
+                       sg_copy_from_buffer(data->sg, 1, host->virt_base,
                                        data->sg->length);
                }
                host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
                 * We don't really have DMA, so we need to copy to our
                 * platform driver buffer
                 */
-               sg_copy_from_buffer(data->sg, 1, host->virt_base,
+               sg_copy_to_buffer(data->sg, 1, host->virt_base,
                                data->sg->length);
        }
 }
index 5aa2c9404e926db1af2e17452a64cf5a08e4156b..be53044086c76f7291224c33ed10c734ece7e897 100644 (file)
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
        do {
                value = atmci_readl(host, ATMCI_RDR);
                if (likely(offset + 4 <= sg->length)) {
-                       sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
+                       sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
 
                        offset += 4;
                        nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
                } else {
                        unsigned int remaining = sg->length - offset;
 
-                       sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
+                       sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
                        nbytes += remaining;
 
                        flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
                                goto done;
 
                        offset = 4 - remaining;
-                       sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
+                       sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
                                        offset, 0);
                        nbytes += offset;
                }
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
 
        do {
                if (likely(offset + 4 <= sg->length)) {
-                       sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
+                       sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
                        atmci_writel(host, ATMCI_TDR, value);
 
                        offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
                        unsigned int remaining = sg->length - offset;
 
                        value = 0;
-                       sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
+                       sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
                        nbytes += remaining;
 
                        host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
                        }
 
                        offset = 4 - remaining;
-                       sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
+                       sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
                                        offset, 0);
                        atmci_writel(host, ATMCI_TDR, value);
                        nbytes += offset;
index 09cb89645d06ebc97c77b886ff3c6a1be9b62c5c..2cfec33178c1fa20532270c04a1f634414a4ec19 100644 (file)
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
 static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
 {
        struct device_node *slot_node;
+       struct platform_device *pdev;
 
        /*
         * TODO: the MMC core framework currently does not support
         * controllers with multiple slots properly. So we only register
         * the first slot for now
         */
-       slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+       slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
        if (!slot_node) {
                dev_warn(parent, "no 'mmc-slot' sub-node found\n");
                return ERR_PTR(-ENOENT);
        }
 
-       return of_platform_device_create(slot_node, NULL, parent);
+       pdev = of_platform_device_create(slot_node, NULL, parent);
+       of_node_put(slot_node);
+
+       return pdev;
 }
 
 static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
index 071693ebfe18914fe799b7da1d91df1e6f8b99b9..68760d4a5d3da1a53c655d1b33f1c2237273d136 100644 (file)
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
        dma_release_channel(host->tx_chan);
        dma_release_channel(host->rx_chan);
 
+       dev_pm_clear_wake_irq(host->dev);
        pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
index 35cc0de6be67a5159a572612ab028ae43d1a84bc..ca0b43973769c9f80b4771914b5b74016b54bf84 100644 (file)
 /* DM_CM_RST */
 #define RST_DTRANRST1          BIT(9)
 #define RST_DTRANRST0          BIT(8)
-#define RST_RESERVED_BITS      GENMASK_ULL(32, 0)
+#define RST_RESERVED_BITS      GENMASK_ULL(31, 0)
 
 /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
 #define INFO1_CLEAR            0
+#define INFO1_MASK_CLEAR       GENMASK_ULL(31, 0)
 #define INFO1_DTRANEND1                BIT(17)
 #define INFO1_DTRANEND0                BIT(16)
 
 /* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+#define INFO2_MASK_CLEAR       GENMASK_ULL(31, 0)
 #define INFO2_DTRANERR1                BIT(17)
 #define INFO2_DTRANERR0                BIT(16)
 
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
 {
        struct renesas_sdhi *priv = host_to_priv(host);
 
+       /* Disable DMAC interrupts, we don't use them */
+       renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
+                                           INFO1_MASK_CLEAR);
+       renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
+                                           INFO2_MASK_CLEAR);
+
        /* Each value is set to non-zero to assume "enabling" each DMA */
        host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
 
index cbfafc453274aa2bef7aee285c5109601516d5e7..270d3c9580c51195ccb6b05e3719e98fb1836031 100644 (file)
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
                                          SPI_MEM_OP_NO_ADDR,
                                          SPI_MEM_OP_NO_DUMMY,
-                                         SPI_MEM_OP_DATA_IN(len, val, 1));
+                                         SPI_MEM_OP_DATA_IN(len, NULL, 1));
+       void *scratchbuf;
        int ret;
 
+       scratchbuf = kmalloc(len, GFP_KERNEL);
+       if (!scratchbuf)
+               return -ENOMEM;
+
+       op.data.buf.in = scratchbuf;
        ret = spi_mem_exec_op(flash->spimem, &op);
        if (ret < 0)
                dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
                        code);
+       else
+               memcpy(val, scratchbuf, len);
+
+       kfree(scratchbuf);
 
        return ret;
 }
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
                                          SPI_MEM_OP_NO_ADDR,
                                          SPI_MEM_OP_NO_DUMMY,
-                                         SPI_MEM_OP_DATA_OUT(len, buf, 1));
+                                         SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+       void *scratchbuf;
+       int ret;
 
-       return spi_mem_exec_op(flash->spimem, &op);
+       scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+       if (!scratchbuf)
+               return -ENOMEM;
+
+       op.data.buf.out = scratchbuf;
+       ret = spi_mem_exec_op(flash->spimem, &op);
+       kfree(scratchbuf);
+
+       return ret;
 }
 
 static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
index 52e2cb35fc7902b1fce895d46e9944a6d908946d..99c460facd5e97702896aae893d4df258320c0f2 100644 (file)
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
        int ret, err = 0;
 
        np = mtd_get_of_node(master);
-       if (!mtd_is_partition(master))
+       if (mtd_is_partition(master))
+               of_node_get(np);
+       else
                np = of_get_child_by_name(np, "partitions");
+
        of_property_for_each_string(np, "compatible", prop, compat) {
                parser = mtd_part_get_compatible_parser(compat);
                if (!parser)
index ca18612c42014288d48c9ac8180a4b5be88e9a06..b864b93dd289ed6eda8b2591006a6ab57922c1b7 100644 (file)
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
        }
 
        iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+       /*
+        * The ->setup_dma() hook kicks DMA by using the data/command
+        * interface, which belongs to a different AXI port from the
+        * register interface.  Read back the register to avoid a race.
+        */
+       ioread32(denali->reg + DMA_ENABLE);
 
        denali_reset_irq(denali);
        denali->setup_dma(denali, dma_addr, page, write);
@@ -1338,6 +1344,11 @@ int denali_init(struct denali_nand_info *denali)
 
        denali_enable_irq(denali);
        denali_reset_banks(denali);
+       if (!denali->max_banks) {
+               /* Error out earlier if no chip is found for some reasons. */
+               ret = -ENODEV;
+               goto disable_irq;
+       }
 
        denali->active_bank = DENALI_INVALID_BANK;
 
index a3f04315c05c54e6332a269603184cac0fc89dd4..427fcbc1b71c0551a107b0d8734bdec808a792c4 100644 (file)
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
        return 0;
 }
 
-static void __init init_mtd_structs(struct mtd_info *mtd)
+static void init_mtd_structs(struct mtd_info *mtd)
 {
        /* initialize mtd and nand data structures */
 
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
 
 }
 
-static int __init read_id_reg(struct mtd_info *mtd)
+static int read_id_reg(struct mtd_info *mtd)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct docg4_priv *doc = nand_get_controller_data(nand);
index 7af4d6213ee56e23d2aaeb63decbfd5cd42a00ae..bc2ef52097834f7c43194835a5177d102e6e2c88 100644 (file)
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
        for (op_id = 0; op_id < subop->ninstrs; op_id++) {
                unsigned int offset, naddrs;
                const u8 *addrs;
-               int len = nand_subop_get_data_len(subop, op_id);
+               int len;
 
                instr = &subop->instrs[op_id];
 
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
                                nfc_op->ndcb[0] |=
                                        NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
                                        NDCB0_LEN_OVRD;
+                               len = nand_subop_get_data_len(subop, op_id);
                                nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
                        }
                        nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
                                nfc_op->ndcb[0] |=
                                        NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
                                        NDCB0_LEN_OVRD;
+                               len = nand_subop_get_data_len(subop, op_id);
                                nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
                        }
                        nfc_op->data_delay_ns = instr->delay_ns;
index 9375cef2242053c1f1ade3323d04d7dd4bea77af..3d27616d9c85540304a8d78c4a2f050c0866b9a2 100644 (file)
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                 case SIOCFINDIPDDPRT:
                        spin_lock_bh(&ipddp_route_lock);
                        rp = __ipddp_find_route(&rcp);
-                       if (rp)
-                               memcpy(&rcp2, rp, sizeof(rcp2));
+                       if (rp) {
+                               memset(&rcp2, 0, sizeof(rcp2));
+                               rcp2.ip    = rp->ip;
+                               rcp2.at    = rp->at;
+                               rcp2.flags = rp->flags;
+                       }
                        spin_unlock_bh(&ipddp_route_lock);
 
                        if (rp) {
index 7c791c1da4b98af8cdd2885f91ce18ae119ae1d2..bef01331266f40776db98937c13d4e468158fa72 100644 (file)
 #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION          0x7000
 #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION          BIT(7)
 #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION           BIT(6)
-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION            BIT(5)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION             BIT(5)
 #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION             BIT(4)
 
 /* Offset 0x0C: ATU Data Register */
index 307410898fc98e69575879489a259943ce090b50..5200e4bdce93d19f3a3938f3fabbe74743b1352d 100644 (file)
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
                chip->ports[entry.portvec].atu_member_violation++;
        }
 
-       if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+       if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
                dev_err_ratelimited(chip->dev,
                                    "ATU miss violation for %pM portvec %x\n",
                                    entry.mac, entry.portvec);
index 17f12c18d225a50a21bba97eabf0670ac9f135e5..7635c38e77dd0ce43a960ffef0fdbaabc4f1422d 100644 (file)
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
        cqe = &admin_queue->cq.entries[head_masked];
 
        /* Go over all the completions */
-       while ((cqe->acq_common_descriptor.flags &
+       while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
                        ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
-               rmb();
+               dma_rmb();
                ena_com_handle_single_admin_completion(admin_queue, cqe);
 
                head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
        mmio_read_reg |= mmio_read->seq_num &
                        ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-       /* make sure read_resp->req_id get updated before the hw can write
-        * there
-        */
-       wmb();
-
-       writel_relaxed(mmio_read_reg,
-                      ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+       writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-       mmiowb();
        for (i = 0; i < timeout; i++) {
-               if (read_resp->req_id == mmio_read->seq_num)
+               if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
                        break;
 
                udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
        aenq_common = &aenq_e->aenq_common_desc;
 
        /* Go over all the events */
-       while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-              phase) {
+       while ((READ_ONCE(aenq_common->flags) &
+               ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Make sure the phase bit (ownership) is as expected before
+                * reading the rest of the descriptor.
+                */
+               dma_rmb();
+
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
                         aenq_common->group, aenq_common->syndrom,
                         (u64)aenq_common->timestamp_low +
index ea149c134e1538d6c888db712d9b6cdc2b01412f..1c682b76190f9eb9ecbe6e428735ae21c4ed8b71 100644 (file)
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        if (desc_phase != expected_phase)
                return NULL;
 
+       /* Make sure we read the rest of the descriptor after the phase bit
+        * has been read
+        */
+       dma_rmb();
+
        return cdesc;
 }
 
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
        if (cdesc_phase != expected_phase)
                return -EAGAIN;
 
+       dma_rmb();
        if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
                pr_err("Invalid req id %d\n", cdesc->req_id);
                return -EINVAL;
index 6fdc753d948382e7a56aaeac801cf16ab3b8f833..2f7657227cfe9c60d77482c98df22fdb2f89e3c9 100644 (file)
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
        return io_sq->q_depth - 1 - cnt;
 }
 
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
-                                           bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 {
        u16 tail;
 
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
                 io_sq->qid, tail);
 
-       if (relaxed)
-               writel_relaxed(tail, io_sq->db_addr);
-       else
-               writel(tail, io_sq->db_addr);
+       writel(tail, io_sq->db_addr);
 
        return 0;
 }
index c673ac2df65bdf3f9b4d03403be705b581505657..29b5774dd32d47e5ad2c43e78b599493f011d129 100644 (file)
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
 static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
 static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+       dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                           DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
        rx_info->page_offset = 0;
        ena_buf = &rx_info->ena_buf;
        ena_buf->paddr = dma;
-       ena_buf->len = PAGE_SIZE;
+       ena_buf->len = ENA_PAGE_SIZE;
 
        return 0;
 }
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                       DMA_FROM_DEVICE);
 
        __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                            rx_ring->qid, i, num);
        }
 
-       if (likely(i)) {
-               /* Add memory barrier to make sure the desc were written before
-                * issue a doorbell
-                */
-               wmb();
-               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
-               mmiowb();
-       }
+       /* ena_com_write_sq_doorbell issues a wmb() */
+       if (likely(i))
+               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
 
        rx_ring->next_to_use = next_to_use;
 
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                               rx_info->page_offset, len, PAGE_SIZE);
+                               rx_info->page_offset, len, ENA_PAGE_SIZE);
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
                          "Destroy failure, restarting device\n");
                ena_dump_stats_to_dmesg(adapter);
                /* rtnl lock already obtained in dev_ioctl() layer */
-               ena_destroy_device(adapter);
+               ena_destroy_device(adapter, false);
                ena_restore_device(adapter);
        }
 
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
                tx_ring->ring_size);
 
-       /* This WMB is aimed to:
-        * 1 - perform smp barrier before reading next_to_completion
-        * 2 - make sure the desc were written before trigger DB
-        */
-       wmb();
-
        /* stop the queue when no more space available, the packet can have up
         * to sgl_size + 2. one for the meta descriptor and one for header
         * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * stop the queue but meanwhile clean_tx_irq updates
                 * next_to_completion and terminates.
                 * The queue will remain stopped forever.
-                * To solve this issue this function perform rmb, check
-                * the wakeup condition and wake up the queue if needed.
+                * To solve this issue add a mb() to make sure that
+                * netif_tx_stop_queue() write is vissible before checking if
+                * there is additional space in the queue.
                 */
-               smp_rmb();
+               smp_mb();
 
                if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
                                > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (netif_xmit_stopped(txq) || !skb->xmit_more) {
-               /* trigger the dma engine */
-               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+               /* trigger the dma engine. ena_com_write_sq_doorbell()
+                * has a mb
+                */
+               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
                u64_stats_update_begin(&tx_ring->syncp);
                tx_ring->tx_stats.doorbells++;
                u64_stats_update_end(&tx_ring->syncp);
@@ -2550,12 +2542,15 @@ err_disable_msix:
        return rc;
 }
 
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 {
        struct net_device *netdev = adapter->netdev;
        struct ena_com_dev *ena_dev = adapter->ena_dev;
        bool dev_up;
 
+       if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               return;
+
        netif_carrier_off(netdev);
 
        del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
        adapter->dev_up_before_reset = dev_up;
 
-       ena_com_set_admin_running_state(ena_dev, false);
+       if (!graceful)
+               ena_com_set_admin_running_state(ena_dev, false);
 
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
@@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 
        clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+       clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
 }
 
 static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                }
        }
 
+       set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
        dev_err(&pdev->dev, "Device reset completed successfully\n");
 
@@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work)
                return;
        }
        rtnl_lock();
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, false);
        ena_restore_device(adapter);
        rtnl_unlock();
 }
@@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev)
                netdev->rx_cpu_rmap = NULL;
        }
 #endif /* CONFIG_RFS_ACCEL */
-
-       unregister_netdev(netdev);
        del_timer_sync(&adapter->timer_service);
 
        cancel_work_sync(&adapter->reset_task);
 
-       /* Reset the device only if the device is running. */
-       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-               ena_com_dev_reset(ena_dev, adapter->reset_reason);
+       unregister_netdev(netdev);
 
-       ena_free_mgmnt_irq(adapter);
+       /* If the device is running then we want to make sure the device will be
+        * reset to make sure no more events will be issued by the device.
+        */
+       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
 
-       ena_disable_msix(adapter);
+       rtnl_lock();
+       ena_destroy_device(adapter, true);
+       rtnl_unlock();
 
        free_netdev(netdev);
 
-       ena_com_mmio_reg_read_request_destroy(ena_dev);
-
-       ena_com_abort_admin_commands(ena_dev);
-
-       ena_com_wait_for_abort_completion(ena_dev);
-
-       ena_com_admin_destroy(ena_dev);
-
        ena_com_rss_destroy(ena_dev);
 
        ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
                        "ignoring device reset request as the device is being suspended\n");
                clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
        }
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, true);
        rtnl_unlock();
        return 0;
 }
index f1972b5ab650332f9fd7c6aedbb949756785c644..7c7ae56c52cfddb11a8e1b096738dfc499ea57dc 100644 (file)
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
index 8bb1e38b1681a32f2175c649b756791694f08105..177587f9c3f1e8560c16e2fc4e05b7edff4e0011 100644 (file)
@@ -5913,12 +5913,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
        return bp->hw_resc.max_cp_rings;
 }
 
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       bp->hw_resc.max_cp_rings = max;
+       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
 }
 
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
@@ -6684,6 +6684,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
                hw_resc->resv_vnics = 0;
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
        }
        return rc;
 }
@@ -8025,7 +8027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
        if (ether_addr_equal(addr->sa_data, dev->dev_addr))
                return 0;
 
-       rc = bnxt_approve_mac(bp, addr->sa_data);
+       rc = bnxt_approve_mac(bp, addr->sa_data, true);
        if (rc)
                return rc;
 
@@ -8629,7 +8631,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
+                       hw_resc->max_irqs);
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8772,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
        if (bp->tx_nr_rings)
                return 0;
 
+       bnxt_ulp_irq_stop(bp);
+       bnxt_clear_int_mode(bp);
        rc = bnxt_set_dflt_rings(bp, true);
        if (rc) {
                netdev_err(bp->dev, "Not enough rings available.\n");
-               return rc;
+               goto init_dflt_ring_err;
        }
        rc = bnxt_init_int_mode(bp);
        if (rc)
-               return rc;
+               goto init_dflt_ring_err;
+
        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
        if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
                bp->flags |= BNXT_FLAG_RFS;
                bp->dev->features |= NETIF_F_NTUPLE;
        }
-       return 0;
+init_dflt_ring_err:
+       bnxt_ulp_irq_restart(bp, rc);
+       return rc;
 }
 
 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
@@ -8819,14 +8827,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
+               bool strict_approval = true;
 
                if (is_valid_ether_addr(vf->mac_addr)) {
                        /* overwrite netdev dev_addr with admin VF MAC */
                        memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+                       /* Older PF driver or firmware may not approve this
+                        * correctly.
+                        */
+                       strict_approval = false;
                } else {
                        eth_hw_addr_random(bp->dev);
                }
-               rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+               rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
 #endif
        }
        return rc;
index fefa011320e0f461047cd397e363d220b5401357..bde384630a75f9aaa723a6976ba1b891465cd9e2 100644 (file)
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 6d583bcd2a81b2f813128ac08b2f55c19feb8545..3962f6fd543c96f6322ca531c9ebdb3eb1cd2a23 100644 (file)
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 
-       vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        max_stat_ctxs = hw_resc->max_stat_ctxs;
 
        /* Remaining rings are distributed equally amongs VF's for now */
-       vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+       vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
+                      bp->cp_nr_rings) / num_vfs;
        vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
-       avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        avail_cp = min_t(int, avail_cp, avail_stat);
 
@@ -1103,7 +1104,7 @@ update_vf_mac_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 }
 
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 {
        struct hwrm_func_vf_cfg_input req = {0};
        int rc = 0;
@@ -1121,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 mac_done:
-       if (rc) {
+       if (rc && strict) {
                rc = -EADDRNOTAVAIL;
                netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
                            mac);
+               return rc;
        }
-       return rc;
+       return 0;
 }
 #else
 
@@ -1143,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
 {
 }
 
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 {
        return 0;
 }
index e9b20cd1988194c265e311ac61ca5ea7b738c118..2eed9eda1195c0abc21b1bc2374a91e99141d6f8 100644 (file)
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
 void bnxt_sriov_disable(struct bnxt *);
 void bnxt_hwrm_exec_fwd_req(struct bnxt *);
 void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *);
+int bnxt_approve_mac(struct bnxt *, u8 *, bool);
 #endif
index 139d96c5a02355f557d586f5edcab1712c048036..092c817f8f11cdda48fe8233fb04b7de2a76586a 100644 (file)
@@ -110,16 +110,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
                                 struct tcf_exts *tc_exts)
 {
        const struct tc_action *tc_act;
-       LIST_HEAD(tc_actions);
-       int rc;
+       int i, rc;
 
        if (!tcf_exts_has_actions(tc_exts)) {
                netdev_info(bp->dev, "no actions");
                return -EINVAL;
        }
 
-       tcf_exts_to_list(tc_exts, &tc_actions);
-       list_for_each_entry(tc_act, &tc_actions, list) {
+       tcf_exts_for_each_action(i, tc_act, tc_exts) {
                /* Drop action */
                if (is_tcf_gact_shot(tc_act)) {
                        actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
index c37b2842f972ced2fb363513368c94b3e42b6815..beee61292d5e522bae0842e5e76253eeb4e1d1f4 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
 }
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
 {
        struct net_device *dev = edev->net;
        struct bnxt *bp = netdev_priv(dev);
-       int max_cp_rings, msix_requested;
 
        ASSERT_RTNL();
        if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
                return 0;
 
-       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
-       msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
        return 0;
 }
 
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
-{
-       ASSERT_RTNL();
-       if (bnxt_ulp_registered(bp->edev, ulp_id)) {
-               struct bnxt_en_dev *edev = bp->edev;
-               unsigned int msix_req, max;
-
-               msix_req = edev->ulp_tbl[ulp_id].msix_requested;
-               max = bnxt_get_max_func_cp_rings(bp);
-               bnxt_set_max_func_cp_rings(bp, max - msix_req);
-               max = bnxt_get_max_func_stat_ctxs(bp);
-               bnxt_set_max_func_stat_ctxs(bp, max - 1);
-       }
-}
-
 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
                         struct bnxt_fw_msg *fw_msg)
 {
index df48ac71729f51d897da9a134d0a1f923b13aeaa..d9bea37cd211f5ce85642f7c6a2f489f51f909b5 100644 (file)
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
 
 int bnxt_get_ulp_msix_num(struct bnxt *bp);
 int bnxt_get_ulp_msix_base(struct bnxt *bp);
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
 void bnxt_ulp_stop(struct bnxt *bp);
 void bnxt_ulp_start(struct bnxt *bp);
 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
index b773bc07edf7cf23154f883a96724bb0407ad303..14b49612aa8639816c1d4b58bdbf5d9ff1995248 100644 (file)
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
 #define UMAC_MAC1                      0x010
 #define UMAC_MAX_FRAME_LEN             0x014
 
+#define UMAC_MODE                      0x44
+#define  MODE_LINK_STATUS              (1 << 5)
+
 #define UMAC_EEE_CTRL                  0x064
 #define  EN_LPI_RX_PAUSE               (1 << 0)
 #define  EN_LPI_TX_PFC                 (1 << 1)
index 5333274a283cbf3d3e1f20c9e9a1905b4d3aa967..4241ae928d4abb4f61d39344e93d08603a2d2c86 100644 (file)
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
 static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
                                          struct fixed_phy_status *status)
 {
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+       struct bcmgenet_priv *priv;
+       u32 reg;
+
+       if (dev && dev->phydev && status) {
+               priv = netdev_priv(dev);
+               reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+               status->link = !!(reg & MODE_LINK_STATUS);
+       }
 
        return 0;
 }
index dc09f9a8a49bb160e29a829cfaa740a36fde2223..f1a86b42261796865fc659bc042c6763a2393a6f 100644 (file)
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
 
        if (np) {
                if (of_phy_is_fixed_link(np)) {
-                       if (of_phy_register_fixed_link(np) < 0) {
-                               dev_err(&bp->pdev->dev,
-                                       "broken fixed-link specification\n");
-                               return -ENODEV;
-                       }
                        bp->phy_node = of_node_get(np);
                } else {
                        bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        struct device_node *np;
-       int err;
+       int err = -ENXIO;
 
        /* Enable management port */
        macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        np = bp->pdev->dev.of_node;
-       if (pdata)
-               bp->mii_bus->phy_mask = pdata->phy_mask;
+       if (np && of_phy_is_fixed_link(np)) {
+               if (of_phy_register_fixed_link(np) < 0) {
+                       dev_err(&bp->pdev->dev,
+                               "broken fixed-link specification %pOF\n", np);
+                       goto err_out_free_mdiobus;
+               }
+
+               err = mdiobus_register(bp->mii_bus);
+       } else {
+               if (pdata)
+                       bp->mii_bus->phy_mask = pdata->phy_mask;
+
+               err = of_mdiobus_register(bp->mii_bus, np);
+       }
 
-       err = of_mdiobus_register(bp->mii_bus, np);
        if (err)
-               goto err_out_free_mdiobus;
+               goto err_out_free_fixed_link;
 
        err = macb_mii_probe(bp->dev);
        if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
 
 err_out_unregister_bus:
        mdiobus_unregister(bp->mii_bus);
+err_out_free_fixed_link:
        if (np && of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
 err_out_free_mdiobus:
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
                if (!(status & MACB_BIT(TGO)))
                        return 0;
 
-               usleep_range(10, 250);
+               udelay(250);
        } while (time_before(halt_time, timeout));
 
        return -ETIMEDOUT;
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
 {
        struct macb_queue *queue;
        unsigned int q;
+       u32 ctrl = macb_readl(bp, NCR);
 
        /* Disable RX and TX (XXX: Should we halt the transmission
         * more gracefully?)
         */
-       macb_writel(bp, NCR, 0);
+       ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
 
        /* Clear the stats registers (XXX: Update stats first?) */
-       macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+       ctrl |= MACB_BIT(CLRSTAT);
+
+       macb_writel(bp, NCR, ctrl);
 
        /* Clear all status flags */
        macb_writel(bp, TSR, -1);
@@ -2223,7 +2233,7 @@ static void macb_init_hw(struct macb *bp)
        }
 
        /* Enable TX and RX */
-       macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
 }
 
 /* The hash address register is 64 bits long and takes up two
@@ -3827,6 +3837,13 @@ static const struct macb_config at91sam9260_config = {
        .init = macb_init,
 };
 
+static const struct macb_config sama5d3macb_config = {
+       .caps = MACB_CAPS_SG_DISABLED
+             | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
 static const struct macb_config pc302gem_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
        .dma_burst_length = 16,
@@ -3894,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,gem", .data = &pc302gem_config },
        { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
        { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
index 623f73dd7738dbbb01b8f49649a4507d641b1d9a..c116f96956fe9370c7927070ee6c9196ba26582b 100644 (file)
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
                                       struct ch_filter_specification *fs)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
+       int i;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, cls->exts) {
                if (is_tcf_gact_ok(a)) {
                        fs->action = FILTER_PASS;
                } else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
        bool act_redir = false;
        bool act_pedit = false;
        bool act_vlan = false;
-       LIST_HEAD(actions);
+       int i;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, cls->exts) {
                if (is_tcf_gact_ok(a)) {
                        /* Do nothing */
                } else if (is_tcf_gact_shot(a)) {
index 18eb2aedd4cb0a2659a479f7f50c62f13f32c78d..c7d2b4dc7568e72e56a393171a42769ac46e6177 100644 (file)
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
        unsigned int num_actions = 0;
        const struct tc_action *a;
        struct tcf_exts *exts;
-       LIST_HEAD(actions);
+       int i;
 
        exts = cls->knode.exts;
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                /* Don't allow more than one action per rule. */
                if (num_actions)
                        return -EINVAL;
index ff92ab1daeb80cec7779e1f00faa5f0174fcf727..1e9d882c04ef1a8d989c56f14f0db271d6c4027d 100644 (file)
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                                port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
                        }
                }
-               return status;
+               goto err;
        }
 
        pcie = be_get_pcie_desc(resp->func_param, desc_count,
index fa5b30f547f6620a6e761860be6afa341dc185c8..08a750fb60c49d397c61845130e153fe1e3b0b3e 100644 (file)
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
 
        /* priv data for the desc, e.g. skb when use with ip stack*/
        void *priv;
-       u16 page_offset;
-       u16 reuse_flag;
+       u32 page_offset;
+       u32 length;     /* length of the buffer */
 
-       u16 length;     /* length of the buffer */
+       u16 reuse_flag;
 
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
                        u8 *auto_neg, u16 *speed, u8 *duplex);
        void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
        void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+       bool (*need_adjust_link)(struct hnae_handle *handle,
+                                int speed, int duplex);
        int (*set_loopback)(struct hnae_handle *handle,
                            enum hnae_loop loop_mode, int en);
        void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
index e6aad30e7e69cd27c904542b29abaaec908bb9c7..b52029e26d15323b98811c5180a3d78ac288bf52 100644 (file)
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
                hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
 }
 
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+       struct dsaf_device *dsaf_dev;
+       struct hns_ppe_cb *ppe_cb;
+       struct hnae_vf_cb *vf_cb;
+       int ret;
+       int i;
+
+       for (i = 0; i < handle->q_num; i++) {
+               ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       ppe_cb = hns_get_ppe_cb(handle);
+       ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+       if (ret)
+               return ret;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+       if (!dsaf_dev)
+               return -EINVAL;
+       ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+       if (ret)
+               return ret;
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+       if (ret)
+               return ret;
+
+       mdelay(10);
+       return 0;
+}
+
 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
 {
        int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
        return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
 }
 
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+                                   int duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
                               int duplex)
 {
        struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
 
-       hns_mac_adjust_link(mac_cb, speed, duplex);
+       switch (mac_cb->dsaf_dev->dsaf_ver) {
+       case AE_VERSION_1:
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               break;
+
+       case AE_VERSION_2:
+               /* chip need to clear all pkt inside */
+               hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+               if (hns_ae_wait_flow_down(handle)) {
+                       hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+                       break;
+               }
+
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+               break;
+
+       default:
+               break;
+       }
+
+       return;
 }
 
 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_status = hns_ae_get_link_status,
        .get_info = hns_ae_get_mac_info,
        .adjust_link = hns_ae_adjust_link,
+       .need_adjust_link = hns_ae_need_adjust_link,
        .set_loopback = hns_ae_config_loopback,
        .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
        .get_pauseparam = hns_ae_get_pauseparam,
index 5488c6e89f211d355ab33e0cda4033b84dcdccb3..09e4061d1fa60a80584b47cea3725374606a429d 100644 (file)
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
        *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
 }
 
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+                                     int duplex)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+       return (mac_cb->speed != speed) ||
+               (mac_cb->half_duplex == duplex);
+}
+
 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                                u32 full_duplex)
 {
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
                hns_gmac_set_uc_match(mac_drv, en);
 }
 
+int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+               /* bit5~bit0 is not send complete pkts */
+               if ((val & 0x3f) == 0)
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(drv->dev,
+                       "hns ge %d fifo was not idle.\n", drv->mac_id);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static void hns_gmac_init(void *mac_drv)
 {
        u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->mac_disable = hns_gmac_disable;
        mac_drv->mac_free = hns_gmac_free;
        mac_drv->adjust_link = hns_gmac_adjust_link;
+       mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
        mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
        mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
        mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->get_strings = hns_gmac_get_strings;
        mac_drv->update_stats = hns_gmac_update_stats;
        mac_drv->set_promiscuous = hns_gmac_set_promisc;
+       mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
 
        return (void *)mac_drv;
 }
index 1c2326bd76e24d301d5689dd163c588d811dc4e8..6ed6f142427e4b68434df612568a7fea2f0433bc 100644 (file)
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+       struct mac_driver *mac_ctrl_drv;
+
+       mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+       if (mac_ctrl_drv->need_adjust_link)
+               return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+                       (enum mac_speed)speed, duplex);
+       else
+               return true;
+}
+
 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
 {
        int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
        return 0;
 }
 
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+       if (drv->wait_fifo_clean)
+               return drv->wait_fifo_clean(drv);
+
+       return 0;
+}
+
 void hns_mac_reset(struct hns_mac_cb *mac_cb)
 {
        struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
                return  DSAF_MAX_PORT_NUM;
 }
 
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
index bbc0a98e7ca3260c7f8b1b8cb7f7f203860bfdce..fbc75341bef760b82a1d7a10469d5649db91e366 100644 (file)
@@ -356,6 +356,9 @@ struct mac_driver {
        /*adjust mac mode of port,include speed and duplex*/
        int (*adjust_link)(void *mac_drv, enum mac_speed speed,
                           u32 full_duplex);
+       /* need adjust link */
+       bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+                                int duplex);
        /* config autoegotaite mode of port*/
        void (*set_an_mode)(void *mac_drv, u8 enable);
        /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
        void (*get_info)(void *mac_drv, struct mac_info *mac_info);
 
        void (*update_stats)(void *mac_drv);
+       int (*wait_fifo_clean)(void *mac_drv);
 
        enum mac_mode mac_mode;
        u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
 
 int hns_mac_init(struct dsaf_device *dsaf_dev);
 void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,        u32 *link_status);
 int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
 int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
 int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
                       const unsigned char *addr);
 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
 
 #endif /* _HNS_DSAF_MAC_H */
index ca50c2553a9cb16c92e2d1dbfeaac7e03808fc71..e557a4ef5996c6772804ca746830af633a4adca9 100644 (file)
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
 }
 
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+       u32 val, val_tmp;
+       int wait_cnt;
+
+       if (port >= DSAF_SERVICE_NW_NUM)
+               return 0;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               if (val == val_tmp)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+                       val, val_tmp);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * dsaf_probe - probo dsaf dev
  * @pdev: dasf platform device
index 4507e8222683c112c05eeca4633e65990b789b8f..0e1cd99831a6083faa790aa80be1f6c635b15a50 100644 (file)
@@ -44,6 +44,8 @@ struct hns_mac_cb;
 #define DSAF_ROCE_CREDIT_CHN   8
 #define DSAF_ROCE_CHAN_MODE    3
 
+#define HNS_MAX_WAIT_CNT 10000
+
 enum dsaf_roce_port_mode {
        DSAF_ROCE_6PORT_MODE,
        DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
 
 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index d160d8c9e45ba60b3543643c13748272baada910..0942e4916d9d0d1b0b78958481ea099e66e59639 100644 (file)
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
        dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
 }
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+               if (!val)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+                       val);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * ppe_init_hw - init ppe
  * @ppe_cb: ppe device
index 9d8e643e8aa6ff518ab68d4af4c42d292a37ae47..f670e63a5a018cd5b48b4a62093c104905aa4463 100644 (file)
@@ -100,6 +100,7 @@ struct ppe_common_cb {
 
 };
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
 int hns_ppe_init(struct dsaf_device *dsaf_dev);
 
 void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
index 9d76e2e54f9df576b65702f720a79ab2a612dbb3..5d64519b9b1dc3cfd6e2c403126aa4ffa5832dfc 100644 (file)
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
                        "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
 }
 
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+       u32 head, tail;
+       int wait_cnt;
+
+       tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+               if (tail == head)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  *hns_rcb_reset_ring_hw - ring reset
  *@q: ring struct pointer
index 602816498c8dd0c4aecd20ae9f5df196d7944821..2319b772a271e519d6a69a0976713a19c60f4a8a 100644 (file)
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
 u32 hns_rcb_get_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 u32 hns_rcb_get_tx_coalesced_frames(
index 886cbbf25761aadc04c18573e9536ecd22e8095b..74d935d82cbc6050a287a07532024675ce75254e 100644 (file)
 #define RCB_RING_INTMSK_TX_OVERTIME_REG                0x000C4
 #define RCB_RING_INTSTS_TX_OVERTIME_REG                0x000C8
 
+#define GMAC_FIFO_STATE_REG                    0x0000UL
 #define GMAC_DUPLEX_TYPE_REG                   0x0008UL
 #define GMAC_FD_FC_TYPE_REG                    0x000CUL
 #define GMAC_TX_WATER_LINE_REG                 0x0010UL
index 9f2b552aee33998680c2dc2c72e81c28f99209f5..f56855e63c961333f20f842a3558a920d201ccc9 100644 (file)
@@ -406,113 +406,13 @@ out_net_tx_busy:
        return NETDEV_TX_BUSY;
 }
 
-/**
- * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
-                                       unsigned int max_size)
-{
-       unsigned char *network;
-       u8 hlen;
-
-       /* this should never happen, but better safe than sorry */
-       if (max_size < ETH_HLEN)
-               return max_size;
-
-       /* initialize network frame pointer */
-       network = data;
-
-       /* set first protocol and move network header forward */
-       network += ETH_HLEN;
-
-       /* handle any vlan tag if present */
-       if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
-               == HNS_RX_FLAG_VLAN_PRESENT) {
-               if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
-                       return max_size;
-
-               network += VLAN_HLEN;
-       }
-
-       /* handle L3 protocols */
-       if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
-               == HNS_RX_FLAG_L3ID_IPV4) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct iphdr)))
-                       return max_size;
-
-               /* access ihl as a u8 to avoid unaligned access on ia64 */
-               hlen = (network[0] & 0x0F) << 2;
-
-               /* verify hlen meets minimum size requirements */
-               if (hlen < sizeof(struct iphdr))
-                       return network - data;
-
-               /* record next protocol if header is present */
-       } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
-               == HNS_RX_FLAG_L3ID_IPV6) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct ipv6hdr)))
-                       return max_size;
-
-               /* record next protocol */
-               hlen = sizeof(struct ipv6hdr);
-       } else {
-               return network - data;
-       }
-
-       /* relocate pointer to start of L4 header */
-       network += hlen;
-
-       /* finally sort out TCP/UDP */
-       if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
-               == HNS_RX_FLAG_L4ID_TCP) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct tcphdr)))
-                       return max_size;
-
-               /* access doff as a u8 to avoid unaligned access on ia64 */
-               hlen = (network[12] & 0xF0) >> 2;
-
-               /* verify hlen meets minimum size requirements */
-               if (hlen < sizeof(struct tcphdr))
-                       return network - data;
-
-               network += hlen;
-       } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
-               == HNS_RX_FLAG_L4ID_UDP) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct udphdr)))
-                       return max_size;
-
-               network += sizeof(struct udphdr);
-       }
-
-       /* If everything has gone correctly network should be the
-        * data section of the packet and will be the end of the header.
-        * If not then it probably represents the end of the last recognized
-        * header.
-        */
-       if ((typeof(max_size))(network - data) < max_size)
-               return network - data;
-       else
-               return max_size;
-}
-
 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
                               struct hnae_ring *ring, int pull_len,
                               struct hnae_desc_cb *desc_cb)
 {
        struct hnae_desc *desc;
-       int truesize, size;
+       u32 truesize;
+       int size;
        int last_offset;
        bool twobufs;
 
@@ -530,7 +430,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
        }
 
        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
-                       size - pull_len, truesize - pull_len);
+                       size - pull_len, truesize);
 
         /* avoid re-using remote pages,flag default unreuse */
        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +595,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
        } else {
                ring->stats.seg_pkt_cnt++;
 
-               pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+               pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
                memcpy(__skb_put(skb, pull_len), va,
                       ALIGN(pull_len, sizeof(long)));
 
@@ -1212,11 +1112,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
        struct hnae_handle *h = priv->ae_handle;
        int state = 1;
 
+       /* If there is no phy, do not need adjust link */
        if (ndev->phydev) {
-               h->dev->ops->adjust_link(h, ndev->phydev->speed,
-                                        ndev->phydev->duplex);
-               state = ndev->phydev->link;
+               /* When phy link down, do nothing */
+               if (ndev->phydev->link == 0)
+                       return;
+
+               if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+                                                 ndev->phydev->duplex)) {
+                       /* because Hi161X chip don't support to change gmac
+                        * speed and duplex with traffic. Delay 200ms to
+                        * make sure there is no more data in chip FIFO.
+                        */
+                       netif_carrier_off(ndev);
+                       msleep(200);
+                       h->dev->ops->adjust_link(h, ndev->phydev->speed,
+                                                ndev->phydev->duplex);
+                       netif_carrier_on(ndev);
+               }
        }
+
        state = state && h->dev->ops->get_status(h);
 
        if (state != priv->link) {
index 08f3c4743f747c7cbe020503d330821fe5427b68..774beda040a16a93a80db08e9383a2fe69a85eba 100644 (file)
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
        }
 
        if (h->dev->ops->adjust_link) {
+               netif_carrier_off(net_dev);
                h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+               netif_carrier_on(net_dev);
                return 0;
        }
 
index 3554dca7a680a2c970b37313222e517199007e82..955c4ab18b03bb300ec7beb8f8bcf953e817b2fc 100644 (file)
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
                                struct hns3_desc_cb *desc_cb)
 {
        struct hns3_desc *desc;
-       int truesize, size;
+       u32 truesize;
+       int size;
        int last_offset;
        bool twobufs;
 
index a02a96aee2a2bfdcb9ebb49ea56b40546df83ae8..cb450d7ec8c1665a9ae690a3e3a9b78843976cf4 100644 (file)
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
 
        /* priv data for the desc, e.g. skb when use with ip stack*/
        void *priv;
-       u16 page_offset;
-       u16 reuse_flag;
-
+       u32 page_offset;
        u32 length;     /* length of the buffer */
 
+       u16 reuse_flag;
+
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
 };
index c8c7ad2eff77ecb5f2bd4ee5012a41b3ac1afcc8..9b5a68b6543287a93107ae2a923ee3bf99a38a82 100644 (file)
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
                /* Wait for link to drop */
                time = jiffies + (HZ / 10);
                do {
-                       if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+                       if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
                                break;
                        if (!in_interrupt())
                                schedule_timeout_interruptible(1);
index 354c0982847b8f1cc478e598f8bd068225aa1241..3726646863095dc145e8df1ccc5d082f9fd802bd 100644 (file)
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
-       case 8192:
-               ret |= EMAC4_MR1_RFS_8K;
-               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_RFS_4K;
                break;
index dafdd4ade705b346349ce4671910424fa7b28d15..4f0daf67b18df2dcf11d7a406ebc1982e0fee466 100644 (file)
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        adapter->map_id = 1;
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
-                       init_rx_pools(netdev);
-                       init_tx_pools(netdev);
+                       rc = init_rx_pools(netdev);
+                       if (rc)
+                               return rc;
+                       rc = init_tx_pools(netdev);
+                       if (rc)
+                               return rc;
 
                        release_napi(adapter);
-                       init_napi(adapter);
+                       rc = init_napi(adapter);
+                       if (rc)
+                               return rc;
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
index bdb3f8e65ed470e314bf6b4f9b3b3b4e41b93e0f..2569a168334cbc6785f9e2909f5a68ac6450c9d6 100644 (file)
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->tx_ring = tx_old;
                e1000_free_all_rx_resources(adapter);
                e1000_free_all_tx_resources(adapter);
-               kfree(tx_old);
-               kfree(rx_old);
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
                if (err)
                        goto err_setup;
        }
+       kfree(tx_old);
+       kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
 err_alloc_rx:
        kfree(txdr);
 err_alloc_tx:
-       e1000_up(adapter);
+       if (netif_running(adapter->netdev))
+               e1000_up(adapter);
 err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
index abcd096ede14022ac6e022536a2db02de7e023da..5ff6caa83948c2c14a6b85070ddaba54e0c50c3a 100644 (file)
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
                i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
 
-       WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+       WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
                  "stat strings count mismatch!");
 }
 
index f2c622e78802a751dc549e03b811825596d1a587..ac685ad4d8773125b059f1209d2b60747996b39b 100644 (file)
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+       struct i40e_pf *pf = vsi->back;
        i40e_status ret;
        int i;
 
-       if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
+       /* There is no need to reset BW when mqprio mode is on.  */
+       if (pf->flags & I40E_FLAG_TC_MQPRIO)
                return 0;
-       if (!vsi->mqprio_qopt.qopt.hw) {
+       if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
                ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
                if (ret)
-                       dev_info(&vsi->back->pdev->dev,
+                       dev_info(&pf->pdev->dev,
                                 "Failed to reset tx rate for vsi->seid %u\n",
                                 vsi->seid);
                return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-                                      NULL);
+       ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
        if (ret) {
-               dev_info(&vsi->back->pdev->dev,
+               dev_info(&pf->pdev->dev,
                         "AQ command Config VSI BW allocation per TC failed = %d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        pf->hw.aq.asq_last_status);
                return -EINVAL;
        }
 
index d8b5fff581e717e189ffe8f50dff4215ce8ec805..868f4a1d0f724379534791f74127261bba0c9682 100644 (file)
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
 #define ice_for_each_rxq(vsi, i) \
        for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
 
+/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+#define ice_for_each_alloc_txq(vsi, i) \
+       for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
+
+#define ice_for_each_alloc_rxq(vsi, i) \
+       for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+
 struct ice_tc_info {
        u16 qoffset;
        u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
        struct list_head tmp_sync_list;         /* MAC filters to be synced */
        struct list_head tmp_unsync_list;       /* MAC filters to be unsynced */
 
-       bool irqs_ready;
-       bool current_isup;               /* Sync 'link up' logging */
-       bool stat_offsets_loaded;
+       u8 irqs_ready;
+       u8 current_isup;                 /* Sync 'link up' logging */
+       u8 stat_offsets_loaded;
 
        /* queue information */
        u8 tx_mapping_mode;              /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
        struct ice_hw_port_stats stats;
        struct ice_hw_port_stats stats_prev;
        struct ice_hw hw;
-       bool stat_prev_loaded;  /* has previous stats been loaded */
+       u8 stat_prev_loaded;    /* has previous stats been loaded */
        char int_name[ICE_INT_NAME_STR_LEN];
 };
 
index 7541ec2270b3708d4e5e9362d0ae8a1f5e09318d..a0614f472658ac5305c6aff1e589d99dbdd1634a 100644 (file)
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
        /* VLAN section */
        __le16 pvid; /* VLANS include priority bits */
        u8 pvlan_reserved[2];
-       u8 port_vlan_flags;
-#define ICE_AQ_VSI_PVLAN_MODE_S        0
-#define ICE_AQ_VSI_PVLAN_MODE_M        (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
-#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_PVLAN_MODE_TAGGED   0x2
-#define ICE_AQ_VSI_PVLAN_MODE_ALL      0x3
+       u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED  0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED    0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL       0x3
 #define ICE_AQ_VSI_PVLAN_INSERT_PVID   BIT(2)
-#define ICE_AQ_VSI_PVLAN_EMOD_S        3
-#define ICE_AQ_VSI_PVLAN_EMOD_M        (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP   (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR      (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING  (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_S         3
+#define ICE_AQ_VSI_VLAN_EMOD_M         (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH  (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP    (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR       (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING   (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
        u8 pvlan_reserved2[3];
        /* ingress egress up sections */
        __le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
 #define ICE_LG_ACT_GENERIC_OFFSET_M    (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
 #define ICE_LG_ACT_GENERIC_PRIORITY_S  22
 #define ICE_LG_ACT_GENERIC_PRIORITY_M  (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX        7
 
        /* Action = 7 - Set Stat count */
 #define ICE_LG_ACT_STAT_COUNT          0x7
index 71d032cc5fa7d7ee8d6af579ca3d0e7813de05b5..661beea6af795cd72abf3e609347c89b21d9902d 100644 (file)
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
 /**
  * ice_clear_pf_cfg - Clear PF configuration
  * @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
  */
 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
 {
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
        struct ice_phy_info *phy_info;
        enum ice_status status = 0;
 
-       if (!pi)
+       if (!pi || !link_up)
                return ICE_ERR_PARAM;
 
        phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
        }
 
        /* LUT size is only valid for Global and PF table types */
-       if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
-               flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
-                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-                        ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
+       switch (lut_size) {
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+               break;
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
                flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
                          ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
-                  (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
-               flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
-                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-                        ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else {
+               break;
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+               if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+                       flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+                                 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+                                ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+                       break;
+               }
+               /* fall-through */
+       default:
                status = ICE_ERR_PARAM;
                goto ice_aq_get_set_rss_lut_exit;
        }
index 7c511f144ed60d92c16f0f526cfb5b03a3c2a59e..62be72fdc8f30c283b2385216f41ff15b87dc628 100644 (file)
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
        return 0;
 
 init_ctrlq_free_rq:
-       ice_shutdown_rq(hw, cq);
-       ice_shutdown_sq(hw, cq);
-       mutex_destroy(&cq->sq_lock);
-       mutex_destroy(&cq->rq_lock);
+       if (cq->rq.head) {
+               ice_shutdown_rq(hw, cq);
+               mutex_destroy(&cq->rq_lock);
+       }
+       if (cq->sq.head) {
+               ice_shutdown_sq(hw, cq);
+               mutex_destroy(&cq->sq_lock);
+       }
        return status;
 }
 
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
                return;
        }
 
-       ice_shutdown_sq(hw, cq);
-       ice_shutdown_rq(hw, cq);
-       mutex_destroy(&cq->sq_lock);
-       mutex_destroy(&cq->rq_lock);
+       if (cq->sq.head) {
+               ice_shutdown_sq(hw, cq);
+               mutex_destroy(&cq->sq_lock);
+       }
+       if (cq->rq.head) {
+               ice_shutdown_rq(hw, cq);
+               mutex_destroy(&cq->rq_lock);
+       }
 }
 
 /**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 
 clean_rq_elem_out:
        /* Set pending if needed, unlock and return */
-       if (pending)
+       if (pending) {
+               /* re-read HW head to calculate actual pending messages */
+               ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
                *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+       }
 clean_rq_elem_err:
        mutex_unlock(&cq->rq_lock);
 
index 1db304c01d100604560a5d4ac8216b093a7f92d6..c71a9b528d6d558eca4a97a4511e445b00aa5bf1 100644 (file)
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
 
-       return ((np->vsi->num_txq + np->vsi->num_rxq) *
+       return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
                (sizeof(struct ice_q_stats) / sizeof(u64)));
 }
 
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                }
 
-               ice_for_each_txq(vsi, i) {
+               ice_for_each_alloc_txq(vsi, i) {
                        snprintf(p, ETH_GSTRING_LEN,
                                 "tx-queue-%u.tx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                }
 
-               ice_for_each_rxq(vsi, i) {
+               ice_for_each_alloc_rxq(vsi, i) {
                        snprintf(p, ETH_GSTRING_LEN,
                                 "rx-queue-%u.rx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
 {
        switch (sset) {
        case ETH_SS_STATS:
+               /* The number (and order) of strings reported *must* remain
+                * constant for a given netdevice. This function must not
+                * report a different number based on run time parameters
+                * (such as the number of queues in use, or the setting of
+                * a private ethtool flag). This is due to the nature of the
+                * ethtool stats API.
+                *
+                * User space programs such as ethtool must make 3 separate
+                * ioctl requests, one for size, one for the strings, and
+                * finally one for the stats. Since these cross into
+                * user space, changes to the number or size could result in
+                * undefined memory access or incorrect string<->value
+                * correlations for statistics.
+                *
+                * Even if it appears to be safe, changes to the size or
+                * order of strings will suffer from race conditions and are
+                * not safe.
+                */
                return ICE_ALL_STATS_LEN(netdev);
        default:
                return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
        /* populate per queue stats */
        rcu_read_lock();
 
-       ice_for_each_txq(vsi, j) {
+       ice_for_each_alloc_txq(vsi, j) {
                ring = READ_ONCE(vsi->tx_rings[j]);
-               if (!ring)
-                       continue;
-               data[i++] = ring->stats.pkts;
-               data[i++] = ring->stats.bytes;
+               if (ring) {
+                       data[i++] = ring->stats.pkts;
+                       data[i++] = ring->stats.bytes;
+               } else {
+                       data[i++] = 0;
+                       data[i++] = 0;
+               }
        }
 
-       ice_for_each_rxq(vsi, j) {
+       ice_for_each_alloc_rxq(vsi, j) {
                ring = READ_ONCE(vsi->rx_rings[j]);
-               data[i++] = ring->stats.pkts;
-               data[i++] = ring->stats.bytes;
+               if (ring) {
+                       data[i++] = ring->stats.pkts;
+                       data[i++] = ring->stats.bytes;
+               } else {
+                       data[i++] = 0;
+                       data[i++] = 0;
+               }
        }
 
        rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
                goto done;
        }
 
-       for (i = 0; i < vsi->num_txq; i++) {
+       for (i = 0; i < vsi->alloc_txq; i++) {
                /* clone ring and setup updated count */
                tx_rings[i] = *vsi->tx_rings[i];
                tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
                goto done;
        }
 
-       for (i = 0; i < vsi->num_rxq; i++) {
+       for (i = 0; i < vsi->alloc_rxq; i++) {
                /* clone ring and setup updated count */
                rx_rings[i] = *vsi->rx_rings[i];
                rx_rings[i].count = new_rx_cnt;
index 499904874b3ff863554c315add28a03f3a260eeb..6076fc87df9d28d81d3cc40d6b1ee7f042667b7f 100644 (file)
 #define PFINT_FW_CTL_CAUSE_ENA_S       30
 #define PFINT_FW_CTL_CAUSE_ENA_M       BIT(PFINT_FW_CTL_CAUSE_ENA_S)
 #define PFINT_OICR                     0x0016CA00
-#define PFINT_OICR_HLP_RDY_S           14
-#define PFINT_OICR_HLP_RDY_M           BIT(PFINT_OICR_HLP_RDY_S)
-#define PFINT_OICR_CPM_RDY_S           15
-#define PFINT_OICR_CPM_RDY_M           BIT(PFINT_OICR_CPM_RDY_S)
 #define PFINT_OICR_ECC_ERR_S           16
 #define PFINT_OICR_ECC_ERR_M           BIT(PFINT_OICR_ECC_ERR_S)
 #define PFINT_OICR_MAL_DETECT_S                19
 #define PFINT_OICR_GRST_M              BIT(PFINT_OICR_GRST_S)
 #define PFINT_OICR_PCI_EXCEPTION_S     21
 #define PFINT_OICR_PCI_EXCEPTION_M     BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_GPIO_S              22
-#define PFINT_OICR_GPIO_M              BIT(PFINT_OICR_GPIO_S)
-#define PFINT_OICR_STORM_DETECT_S      24
-#define PFINT_OICR_STORM_DETECT_M      BIT(PFINT_OICR_STORM_DETECT_S)
 #define PFINT_OICR_HMC_ERR_S           26
 #define PFINT_OICR_HMC_ERR_M           BIT(PFINT_OICR_HMC_ERR_S)
 #define PFINT_OICR_PE_CRITERR_S                28
index d23a91665b463799a6a3e4445d3c681a2400fb15..068dbc740b7667ce98541921a57108503e825594 100644 (file)
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
 struct ice_rlan_ctx {
        u16 head;
        u16 cpuid; /* bigger than needed, see above for reason */
+#define ICE_RLAN_BASE_S 7
        u64 base;
        u16 qlen;
 #define ICE_RLAN_CTX_DBUF_S 7
index 5299caf55a7f2b44772988d9f4eeb70b27c51cc4..f1e80eed2fd6d9f94eab163acb4d178d83ba4af2 100644 (file)
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
                case ice_aqc_opc_get_link_status:
                        if (ice_handle_link_event(pf))
                                dev_err(&pf->pdev->dev,
-                                       "Could not handle link event");
+                                       "Could not handle link event\n");
                        break;
                default:
                        dev_dbg(&pf->pdev->dev,
@@ -916,6 +916,21 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
        return pending && (i == ICE_DFLT_IRQ_WORK);
 }
 
+/**
+ * ice_ctrlq_pending - check if there is a difference between ntc and ntu
+ * @hw: pointer to hardware info
+ * @cq: control queue information
+ *
+ * returns true if there are pending messages in a queue, false if there aren't
+ */
+static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+       u16 ntu;
+
+       ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+       return cq->rq.next_to_clean != ntu;
+}
+
 /**
  * ice_clean_adminq_subtask - clean the AdminQ rings
  * @pf: board private structure
@@ -923,7 +938,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
 static void ice_clean_adminq_subtask(struct ice_pf *pf)
 {
        struct ice_hw *hw = &pf->hw;
-       u32 val;
 
        if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
                return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
 
        clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
 
-       /* re-enable Admin queue interrupt causes */
-       val = rd32(hw, PFINT_FW_CTL);
-       wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+       /* There might be a situation where new messages arrive to a control
+        * queue between processing the last message and clearing the
+        * EVENT_PENDING bit. So before exiting, check queue head again (using
+        * ice_ctrlq_pending) and process new messages if any.
+        */
+       if (ice_ctrlq_pending(hw, &hw->adminq))
+               __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
 
        ice_flush(hw);
 }
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
                qcount = numq_tc;
        }
 
-       /* find higher power-of-2 of qcount */
-       pow = ilog2(qcount);
-
-       if (!is_power_of_2(qcount))
-               pow++;
+       /* find the (rounded up) power-of-2 of qcount */
+       pow = order_base_2(qcount);
 
        for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
                if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
        ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
        /* Traffic from VSI can be sent to LAN */
        ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
-       /* Allow all packets untagged/tagged */
-       ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
-                                      ICE_AQ_VSI_PVLAN_MODE_M) >>
-                                     ICE_AQ_VSI_PVLAN_MODE_S);
-       /* Show VLAN/UP from packets in Rx descriptors */
-       ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
-                                       ICE_AQ_VSI_PVLAN_EMOD_M) >>
-                                      ICE_AQ_VSI_PVLAN_EMOD_S);
+
+       /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+        * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+        * packets untagged/tagged.
+        */
+       ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+                                 ICE_AQ_VSI_VLAN_MODE_M) >>
+                                ICE_AQ_VSI_VLAN_MODE_S);
+
        /* Have 1:1 UP mapping for both ingress/egress tables */
        table |= ICE_UP_TABLE_TRANSLATE(0, 0);
        table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
        wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
        rd32(hw, PFINT_OICR);           /* read to clear */
 
-       val = (PFINT_OICR_HLP_RDY_M |
-              PFINT_OICR_CPM_RDY_M |
-              PFINT_OICR_ECC_ERR_M |
+       val = (PFINT_OICR_ECC_ERR_M |
               PFINT_OICR_MAL_DETECT_M |
               PFINT_OICR_GRST_M |
               PFINT_OICR_PCI_EXCEPTION_M |
-              PFINT_OICR_GPIO_M |
-              PFINT_OICR_STORM_DETECT_M |
-              PFINT_OICR_HMC_ERR_M);
+              PFINT_OICR_HMC_ERR_M |
+              PFINT_OICR_PE_CRITERR_M);
 
        wr32(hw, PFINT_OICR_ENA, val);
 
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
 skip_req_irq:
        ice_ena_misc_vector(pf);
 
-       val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
-             (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
-             PFINT_OICR_CTL_CAUSE_ENA_M;
+       val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+              PFINT_OICR_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_OICR_CTL, val);
 
        /* This enables Admin queue Interrupt causes */
-       val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
-             (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
-             PFINT_FW_CTL_CAUSE_ENA_M;
+       val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+              PFINT_FW_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_FW_CTL, val);
 
        itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
        if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
                ice_dis_msix(pf);
 
-       devm_kfree(&pf->pdev->dev, pf->irq_tracker);
-       pf->irq_tracker = NULL;
+       if (pf->irq_tracker) {
+               devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+               pf->irq_tracker = NULL;
+       }
 }
 
 /**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
 
        err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
        if (err) {
-               dev_err(&pdev->dev, "I/O map error %d\n", err);
+               dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
                return err;
        }
 
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
        enum ice_status status;
 
        /* Here we are configuring the VSI to let the driver add VLAN tags by
-        * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
-        * tag insertion happens in the Tx hot path, in ice_tx_map.
+        * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+        * insertion happens in the Tx hot path, in ice_tx_map.
         */
-       ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
 
        ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
        ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
                return -EIO;
        }
 
-       vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
        return 0;
 }
 
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
         */
        if (ena) {
                /* Strip VLAN tag from Rx packet and put it in the desc */
-               ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
        } else {
                /* Disable stripping. Leave tag in packet */
-               ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
        }
 
+       /* Allow all packets untagged/tagged */
+       ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
        ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
        ctxt.vsi_num = vsi->vsi_num;
 
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
                return -EIO;
        }
 
-       vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
        return 0;
 }
 
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
        /* clear the context structure first */
        memset(&rlan_ctx, 0, sizeof(rlan_ctx));
 
-       rlan_ctx.base = ring->dma >> 7;
+       rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
 
        rlan_ctx.qlen = ring->count;
 
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
 {
        int err;
 
-       ice_set_rx_mode(vsi->netdev);
-
-       err = ice_restore_vlan(vsi);
-       if (err)
-               return err;
+       if (vsi->netdev) {
+               ice_set_rx_mode(vsi->netdev);
+               err = ice_restore_vlan(vsi);
+               if (err)
+                       return err;
+       }
 
        err = ice_vsi_cfg_txqs(vsi);
        if (!err)
@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
  */
 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
 {
-       int i, err;
+       int i, err = 0;
 
        if (!vsi->num_txq) {
                dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
  */
 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
 {
-       int i, err;
+       int i, err = 0;
 
        if (!vsi->num_rxq) {
                dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
        u8 count = 0;
 
        if (new_mtu == netdev->mtu) {
-               netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
+               netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
                return 0;
        }
 
index 92da0a626ce0b38395633ff90df504f8f88f40c8..295a8cd87fc16565148bf6cbb7713c02a71454f9 100644 (file)
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
  *
  * This function will request NVM ownership.
  */
-static enum
-ice_status ice_acquire_nvm(struct ice_hw *hw,
-                          enum ice_aq_res_access_type access)
+static enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
 {
        if (hw->nvm.blank_nvm_mode)
                return 0;
index 2e6c1d92cc8884b2bb9755483f82c421b5d9e6f1..eeae199469b6e7f17f680276bb83f77534cbe555 100644 (file)
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
                        return status;
        }
 
-       if (owner == ICE_SCHED_NODE_OWNER_LAN)
-               vsi->max_lanq[tc] = new_numqs;
+       vsi->max_lanq[tc] = new_numqs;
 
        return status;
 }
index 723d15f1e90b4abbb4db585734940bfc874d4712..6b7ec2ae5ad6798818a9d5947e8071aba410ffff 100644 (file)
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
        lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
 
-       act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+       act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
+              ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
 
        /* Third action Marker value */
        act |= ICE_LG_ACT_GENERIC;
        act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
                ICE_LG_ACT_GENERIC_VALUE_M;
 
-       act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
        lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
 
        /* call the fill switch rule to fill the lookup tx rx structure */
index 6f4a0d159dbfdc2bfb1fe5fbd5fa355dc5503832..9b8ec128ee31f57ea2c2498a0fa55bde139ca7b9 100644 (file)
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
        u16 vsis_unallocated;
        u16 flags;
        struct ice_aqc_vsi_props info;
-       bool alloc_from_pool;
+       u8 alloc_from_pool;
 };
 
 enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
        u8 qgrp_size;
 
        /* Rule creations populate these indicators basing on the switch type */
-       bool lb_en;     /* Indicate if packet can be looped back */
-       bool lan_en;    /* Indicate if packet can be forwarded to the uplink */
+       u8 lb_en;       /* Indicate if packet can be looped back */
+       u8 lan_en;      /* Indicate if packet can be forwarded to the uplink */
 };
 
 /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
index 567067b650c4129acafbea94388731f7ad315ba6..31bc998fe2006f7fa515a4b05917abdae50cb7c0 100644 (file)
@@ -143,7 +143,7 @@ struct ice_ring {
        u16 next_to_use;
        u16 next_to_clean;
 
-       bool ring_active;               /* is ring online or not */
+       u8 ring_active;                 /* is ring online or not */
 
        /* stats structs */
        struct ice_q_stats      stats;
index 99c8a9a71b5e4f433a89d575c92fcd34e59ab6ae..97c366e0ca596facaec70c7f1bb4ddc8b81e8774 100644 (file)
@@ -83,7 +83,7 @@ struct ice_link_status {
        u64 phy_type_low;
        u16 max_frame_size;
        u16 link_speed;
-       bool lse_ena;   /* Link Status Event notification */
+       u8 lse_ena;     /* Link Status Event notification */
        u8 link_info;
        u8 an_info;
        u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
        struct ice_link_status link_info_old;
        u64 phy_type_low;
        enum ice_media_type media_type;
-       bool get_link_info;
+       u8 get_link_info;
 };
 
 /* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
        u32 oem_ver;              /* OEM version info */
        u16 sr_words;             /* Shadow RAM size in words */
        u16 ver;                  /* NVM package version */
-       bool blank_nvm_mode;      /* is NVM empty (no FW present) */
+       u8 blank_nvm_mode;        /* is NVM empty (no FW present) */
 };
 
 /* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
        struct ice_aqc_txsched_elem_data info;
        u32 agg_id;                     /* aggregator group id */
        u16 vsi_id;
-       bool in_use;                    /* suspended or in use */
+       u8 in_use;                      /* suspended or in use */
        u8 tx_sched_layer;              /* Logical Layer (1-9) */
        u8 num_children;
        u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
 struct ice_sched_tx_policy {
        u16 max_num_vsis;
        u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
-       bool rdma_ena;
+       u8 rdma_ena;
 };
 
 struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
        struct list_head agg_list;      /* lists all aggregator */
        u8 lport;
 #define ICE_LPORT_MASK         0xff
-       bool is_vf;
+       u8 is_vf;
 };
 
 struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
        u8 max_cgds;
        u8 sw_entry_point_layer;
 
-       bool evb_veb;           /* true for VEB, false for VEPA */
+       u8 evb_veb;             /* true for VEB, false for VEPA */
        struct ice_bus_info bus;
        struct ice_nvm_info nvm;
        struct ice_hw_dev_caps dev_caps;        /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
        u8 itr_gran_100;
        u8 itr_gran_50;
        u8 itr_gran_25;
-       bool ucast_shared;      /* true if VSIs can share unicast addr */
+       u8 ucast_shared;        /* true if VSIs can share unicast addr */
 
 };
 
index f92f7918112de063700f12de3462a482999c8bb3..5acf3b743876a485f61002658dccba47a0ca3d59 100644 (file)
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
        if (hw->phy.type == e1000_phy_m88)
                igb_phy_disable_receiver(adapter);
 
-       mdelay(500);
+       msleep(500);
        return 0;
 }
 
index d03c2f0d759260df50e9d71da5758912326e804f..a32c576c1e656c0102989413cad114d1d8f03771 100644 (file)
@@ -3873,7 +3873,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
 
        adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
                                     sizeof(struct igb_mac_addr),
-                                    GFP_ATOMIC);
+                                    GFP_KERNEL);
        if (!adapter->mac_table)
                return -ENOMEM;
 
@@ -3883,7 +3883,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
 
        /* Setup and initialize a copy of the hw vlan table array */
        adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
-                                      GFP_ATOMIC);
+                                      GFP_KERNEL);
        if (!adapter->shadow_vfta)
                return -ENOMEM;
 
@@ -5816,7 +5816,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
 csum_failed:
-               if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+               if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
+                   !tx_ring->launchtime_enable)
                        return;
                goto no_csum;
        }
index 43664adf7a3c120d024889ac59f42e2da5d5b1ae..d3e72d0f66ef428b08e4bd88508e05b734bc43a4 100644 (file)
@@ -771,14 +771,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
        rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
        rxdr->size = ALIGN(rxdr->size, 4096);
 
-       rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
-                                       GFP_KERNEL);
+       rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+                                        GFP_KERNEL);
 
        if (!rxdr->desc) {
                vfree(rxdr->buffer_info);
                return -ENOMEM;
        }
-       memset(rxdr->desc, 0, rxdr->size);
 
        rxdr->next_to_clean = 0;
        rxdr->next_to_use = 0;
index 94b3165ff543055621326783d330da1f7b4272e7..ccd852ad62a4b109ff26ea40e7587024f8b0d198 100644 (file)
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        }
 
        /* alloc the udl from per cpu ddp pool */
-       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
        if (!ddp->udl) {
                e_err(drv, "failed allocated ddp context\n");
                goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
                return 0;
 
        /* Extra buffer to be shared by all DDPs for HW work around */
-       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
index 447098005490926f67e5fb32f7c84b798e387eab..9a23d33a47ed52bfeb10d79d970e114ee4702d6e 100644 (file)
@@ -6201,7 +6201,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 
        adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
                                     sizeof(struct ixgbe_mac_addr),
-                                    GFP_ATOMIC);
+                                    GFP_KERNEL);
        if (!adapter->mac_table)
                return -ENOMEM;
 
@@ -6620,8 +6620,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        if (adapter->xdp_prog) {
-               e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
-               return -EPERM;
+               int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+                                    VLAN_HLEN;
+               int i;
+
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+                       if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+                               e_warn(probe, "Requested MTU size is not supported with XDP\n");
+                               return -EINVAL;
+                       }
+               }
        }
 
        /*
@@ -8983,6 +8993,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 
 #ifdef CONFIG_IXGBE_DCB
        if (tc) {
+               if (adapter->xdp_prog) {
+                       e_warn(probe, "DCB is not supported with XDP\n");
+
+                       ixgbe_init_interrupt_scheme(adapter);
+                       if (netif_running(dev))
+                               ixgbe_open(dev);
+                       return -EINVAL;
+               }
+
                netdev_set_num_tc(dev, tc);
                ixgbe_set_prio_tc_map(adapter);
 
@@ -9171,14 +9190,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
                            struct tcf_exts *exts, u64 *action, u8 *queue)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
+       int i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
-
+       tcf_exts_for_each_action(i, a, exts) {
                /* Drop action */
                if (is_tcf_gact_shot(a)) {
                        *action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9953,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
        int tcs = adapter->hw_tcs ? : 1;
        int pool, err;
 
+       if (adapter->xdp_prog) {
+               e_warn(probe, "L2FW offload is not supported with XDP\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        /* The hardware supported by ixgbe only filters on the destination MAC
         * address. In order to avoid issues we only support offloading modes
         * where the hardware can actually provide the functionality.
index 6f59933cdff7d5ff13620ced894036ddeb3052c3..3c6f01c41b788eb45730e49083f2025c7c0683f1 100644 (file)
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
+       if (adapter->xdp_prog) {
+               e_warn(probe, "SRIOV is not supported with XDP\n");
+               return -EINVAL;
+       }
+
        /* Enable VMDq flag so device will be set in VM mode */
        adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
                          IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+       u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
        u8 num_tcs = adapter->hw_tcs;
+       u32 reg_val;
+       u32 queue;
+       u32 word;
 
        /* remove VLAN filters beloning to this VF */
        ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 
        /* reset VF api back to unknown */
        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+
+       /* Restart each queue for given VF */
+       for (queue = 0; queue < q_per_pool; queue++) {
+               unsigned int reg_idx = (vf * q_per_pool) + queue;
+
+               reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
+
+               /* Re-enabling only configured queues */
+               if (reg_val) {
+                       reg_val |= IXGBE_TXDCTL_ENABLE;
+                       IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+                       reg_val &= ~IXGBE_TXDCTL_ENABLE;
+                       IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+               }
+       }
+
+       /* Clear VF's mailbox memory */
+       for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
+
+       IXGBE_WRITE_FLUSH(hw);
 }
 
 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
index 44cfb2021145b9be9284c3862c52abf5c5ba6f82..41bcbb337e837ff1dbeb8e6061ae92e9d050350c 100644 (file)
@@ -2518,6 +2518,7 @@ enum {
 /* Translated register #defines */
 #define IXGBE_PVFTDH(P)                (0x06010 + (0x40 * (P)))
 #define IXGBE_PVFTDT(P)                (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P)     (0x06028 + (0x40 * (P)))
 #define IXGBE_PVFTDWBAL(P)     (0x06038 + (0x40 * (P)))
 #define IXGBE_PVFTDWBAH(P)     (0x0603C + (0x40 * (P)))
 
index 7a637b51c7d2302b63f2eb579adea4e0459e7430..e08301d833e2ed91bda977109404175f556f563f 100644 (file)
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
                struct ltq_etop_chan *ch = &priv->ch[i];
 
                ch->idx = ch->dma.nr = i;
+               ch->dma.dev = &priv->pdev->dev;
 
                if (IS_TX(i)) {
                        ltq_dma_alloc_tx(&ch->dma);
index 32d785b616e1e270f2adb47978ce3b3f172f02ac..702fec82d80636660881cfd285f014f3e0be0f1a 100644 (file)
@@ -58,6 +58,8 @@ static struct {
  */
 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
                             const struct phylink_link_state *state);
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+                             phy_interface_t interface, struct phy_device *phy);
 
 /* Queue modes */
 #define MVPP2_QDIST_SINGLE_MODE        0
@@ -3142,6 +3144,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
                mvpp22_mode_reconfigure(port);
 
        if (port->phylink) {
+               netif_carrier_off(port->dev);
                phylink_start(port->phylink);
        } else {
                /* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3153,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
                 */
                struct phylink_link_state state = {
                        .interface = port->phy_interface,
-                       .link = 1,
                };
                mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+               mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
+                                 NULL);
        }
 
        netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4499,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
                return;
        }
 
-       netif_tx_stop_all_queues(port->dev);
-       if (!port->has_phy)
-               netif_carrier_off(port->dev);
-
        /* Make sure the port is disabled when reconfiguring the mode */
        mvpp2_port_disable(port);
 
@@ -4523,16 +4523,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
        if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
                mvpp2_port_loopback_set(port, state);
 
-       /* If the port already was up, make sure it's still in the same state */
-       if (state->link || !port->has_phy) {
-               mvpp2_port_enable(port);
-
-               mvpp2_egress_enable(port);
-               mvpp2_ingress_enable(port);
-               if (!port->has_phy)
-                       netif_carrier_on(dev);
-               netif_tx_wake_all_queues(dev);
-       }
+       mvpp2_port_enable(port);
 }
 
 static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4803,6 +4794,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->min_mtu = ETH_MIN_MTU;
        /* 9704 == 9728 - 20 and rounding to 8 */
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+       dev->dev.of_node = port_node;
 
        /* Phylink isn't used w/ ACPI as of now */
        if (port_node) {
index b994b80d5714ad142a3af4711c65d62b7b1d5a6c..37ba7c78859db17aa7ecfa76648ca54adb71790b 100644 (file)
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        delayed_event_start(priv);
 
        dev_ctx->context = intf->add(dev);
-       set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-       if (intf->attach)
-               set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
        if (dev_ctx->context) {
+               set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+               if (intf->attach)
+                       set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
        if (intf->attach) {
                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
                        goto out;
-               intf->attach(dev, dev_ctx->context);
+               if (intf->attach(dev, dev_ctx->context))
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
        } else {
                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
                        goto out;
                dev_ctx->context = intf->add(dev);
+               if (!dev_ctx->context)
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
        }
 
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
                }
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-       return (u16)((dev->pdev->bus->number << 8) |
+       return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+                    (dev->pdev->bus->number << 8) |
                     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-       u16 pci_id = mlx5_gen_pci_id(dev);
+       u32 pci_id = mlx5_gen_pci_id(dev);
        struct mlx5_core_dev *res = NULL;
        struct mlx5_core_dev *tmp_dev;
        struct mlx5_priv *priv;
index 75bb981e00b7205ad10b467dc64a821a4f8867a9..41cde926cdab6d3d061d62d424ed1bff8c860c9d 100644 (file)
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
 {
        if (psrc_m) {
                MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
-               MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+               MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
        }
 
        if (pdst_m) {
index 9131a1376e7dc540cc0238b0cf980bc54c9c19ae..9fed54017659de3b0f58a1287a7eff605c077f6c 100644 (file)
@@ -1982,14 +1982,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                goto out_ok;
 
        modify_ip_header = false;
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
+               int k;
+
                if (!is_tcf_pedit(a))
                        continue;
 
                nkeys = tcf_pedit_nkeys(a);
-               for (i = 0; i < nkeys; i++) {
-                       htype = tcf_pedit_htype(a, i);
+               for (k = 0; k < nkeys; k++) {
+                       htype = tcf_pedit_htype(a, k);
                        if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
                            htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
                                modify_ip_header = true;
@@ -2053,15 +2054,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        const struct tc_action *a;
        LIST_HEAD(actions);
        u32 action = 0;
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
        attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_shot(a)) {
                        action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
                        if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2666,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        LIST_HEAD(actions);
        bool encap = false;
        u32 action = 0;
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
@@ -2674,8 +2674,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        attr->in_rep = rpriv->rep;
        attr->in_mdev = priv->mdev;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_shot(a)) {
                        action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
                                  MLX5_FLOW_CONTEXT_ACTION_COUNT;
index f72b5c9dcfe95f98cc388676462207bf76247504..3028e8d90920e2940cc3247775bd7a498fd906ef 100644 (file)
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
        if (err)
                goto miss_rule_err;
 
+       kvfree(flow_group_in);
        return 0;
 
 miss_rule_err:
index f418541af7cf13f96ea24613648150bb727397be..37d114c668b7ba70ca968f76c88c42af84967f25 100644 (file)
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
        return version;
 }
 
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 u32 *match_value,
+                 bool take_write)
+{
+       struct fs_fte *fte_tmp;
+
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
+       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+               fte_tmp = NULL;
+               goto out;
+       }
+
+       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+       if (take_write)
+               up_write_ref_node(&g->node);
+       else
+               up_read_ref_node(&g->node);
+       return fte_tmp;
+}
+
 static struct mlx5_flow_handle *
 try_add_to_existing_fg(struct mlx5_flow_table *ft,
                       struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
        if (IS_ERR(fte))
                return  ERR_PTR(-ENOMEM);
 
-       list_for_each_entry(iter, match_head, list) {
-               nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
-       }
-
 search_again_locked:
        version = matched_fgs_get_version(match_head);
        /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
                struct fs_fte *fte_tmp;
 
                g = iter->g;
-               fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
-                                                rhash_fte);
-               if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+               fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+               if (!fte_tmp)
                        continue;
-
-               nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-               if (!take_write) {
-                       list_for_each_entry(iter, match_head, list)
-                               up_read_ref_node(&iter->g->node);
-               } else {
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
-               }
-
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte_tmp);
                up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
                return rule;
        }
 
-       /* No group with matching fte found. Try to add a new fte to any
-        * matching fg.
-        */
-
-       if (!take_write) {
-               list_for_each_entry(iter, match_head, list)
-                       up_read_ref_node(&iter->g->node);
-               list_for_each_entry(iter, match_head, list)
-                       nested_down_write_ref_node(&iter->g->node,
-                                                  FS_LOCK_PARENT);
-               take_write = true;
-       }
-
        /* Check the ft version, for case that new flow group
         * was added while the fgs weren't locked
         */
@@ -1657,27 +1656,30 @@ search_again_locked:
        /* Check the fgs version, for case the new FTE with the
         * same values was added while the fgs weren't locked
         */
-       if (version != matched_fgs_get_version(match_head))
+       if (version != matched_fgs_get_version(match_head)) {
+               take_write = true;
                goto search_again_locked;
+       }
 
        list_for_each_entry(iter, match_head, list) {
                g = iter->g;
 
                if (!g->node.active)
                        continue;
+
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
                err = insert_fte(g, fte);
                if (err) {
+                       up_write_ref_node(&g->node);
                        if (err == -ENOSPC)
                                continue;
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
                        kmem_cache_free(steering->ftes_cache, fte);
                        return ERR_PTR(err);
                }
 
                nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
-               list_for_each_entry(iter, match_head, list)
-                       up_write_ref_node(&iter->g->node);
+               up_write_ref_node(&g->node);
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte);
                up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
        }
        rule = ERR_PTR(-ENOENT);
 out:
-       list_for_each_entry(iter, match_head, list)
-               up_write_ref_node(&iter->g->node);
        kmem_cache_free(steering->ftes_cache, fte);
        return rule;
 }
@@ -1726,6 +1726,8 @@ search_again_locked:
        if (err) {
                if (take_write)
                        up_write_ref_node(&ft->node);
+               else
+                       up_read_ref_node(&ft->node);
                return ERR_PTR(err);
        }
 
index d39b0b7011b2d9cf194180813a5a30d9fe226b6d..9f39aeca863f321fe169fb3d69ef1cff98bbaf18 100644 (file)
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
        add_timer(&health->timer);
 }
 
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
+
+       if (disable_health) {
+               spin_lock_irqsave(&health->wq_lock, flags);
+               set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+               set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+               spin_unlock_irqrestore(&health->wq_lock, flags);
+       }
 
        del_timer_sync(&health->timer);
 }
index cf3e4a6590524e1c9cdd9fb2da7ab512abbb2efc..b5e9f664fc66758d5642b18e2396503baf351415 100644 (file)
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        priv->numa_node = dev_to_node(&dev->pdev->dev);
 
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-       if (!priv->dbg_root)
+       if (!priv->dbg_root) {
+               dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
                return -ENOMEM;
+       }
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
        mlx5_pci_disable_device(dev);
-       debugfs_remove(priv->dbg_root);
+       debugfs_remove_recursive(priv->dbg_root);
 }
 
 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
                mlx5_cleanup_once(dev);
 
 err_stop_poll:
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, boot);
        if (mlx5_cmd_teardown_hca(dev)) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
                goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_free_irq_vectors(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, cleanup);
        err = mlx5_cmd_teardown_hca(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
         * with the HCA, so the health polll is no longer needed.
         */
        mlx5_drain_health_wq(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, false);
 
        ret = mlx5_cmd_force_teardown_hca(dev);
        if (ret) {
index 86478a6b99c5068e13688f2556e954ee3b3f9486..68e7f8df2a6d310989a2b6ee9b2e6c488c3b17e0 100644 (file)
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
 {
-       return (u32)wq->fbc.frag_sz_m1 + 1;
+       return wq->fbc.frag_sz_m1 + 1;
 }
 
 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
-       u32 sq_strides_offset;
+       u16 sq_strides_offset;
+       u32 rq_pg_remainder;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
 
-       sq_strides_offset =
-               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+       rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
+       sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
 
        mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
                             MLX5_GET(qpc, qpc, log_sq_size),
index 2bd4c3184eba21d866ea8df66699a41145e3ec10..3a1a170bb2d7f3244e7761a6acf6c1fb4a3534c3 100644 (file)
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl);
 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
index 6070d1591d1e77885185fa43e261ec4a80b5ae01..930700413b1d07dd8a62ffbabf47aea995d062cf 100644 (file)
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOMEM;
        mall_tc_entry->cookie = f->cookie;
 
-       tcf_exts_to_list(f->exts, &actions);
-       a = list_first_entry(&actions, struct tc_action, list);
+       a = tcf_exts_first_action(f->exts);
 
        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
index 3ae9301967410bca24e601e888b8b270b25c8e82..3cdb7aca90b72492f02fd4bce2c35ad6de76eaa9 100644 (file)
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
 void
 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+                                struct net_device *dev);
 
 /* spectrum_kvdl.c */
 enum mlxsw_sp_kvdl_entry_type {
index 4327487553c5338581b200469136cc1077d028bf..3589432d164375240e013d476df8cd57e1c7c0ed 100644 (file)
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
        MLXSW_SP_SB_CM(1, 0xff, 0),
 };
 
index ebd1b24ebaa5dd48df775d9a0704bd81b8ed09df..8d211972c5e90fbe1a24dc32edf6f371ff018bd4 100644 (file)
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                                         struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
        if (err)
                return err;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_ok(a)) {
                        err = mlxsw_sp_acl_rulei_act_terminate(rulei);
                        if (err) {
index 3a96307f51b055e4bcf0693704118fd9d4e90995..2ab9cf25a08ae19788d28ffddaa8698ba2213152 100644 (file)
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
        mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+                                struct net_device *dev)
+{
+       struct mlxsw_sp_rif *rif;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!rif)
+               return;
+       mlxsw_sp_rif_destroy(rif);
+}
+
 static void
 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
                                 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
index 0d8444aaba01ae7e5eddc58ef7dbc87ee5d1cb7b..db715da7bab7746c58ed23b48048d8a4917ca10d 100644 (file)
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
        return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 }
 
+static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
+                                                   void *data)
+{
+       struct mlxsw_sp *mlxsw_sp = data;
+
+       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+       return 0;
+}
+
+static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
+                                               struct net_device *dev)
+{
+       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+       netdev_walk_all_upper_dev_rcu(dev,
+                                     mlxsw_sp_bridge_device_upper_rif_destroy,
+                                     mlxsw_sp);
+}
+
 static struct mlxsw_sp_bridge_device *
 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
                              struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
                               struct mlxsw_sp_bridge_device *bridge_device)
 {
+       mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
+                                           bridge_device->dev);
        list_del(&bridge_device->list);
        if (bridge_device->vlan_enabled)
                bridge->vlan_enabled_exists = false;
index e7dce79ff2c910a090de7e55bf3e3efae3e0f2b7..001b5f714c1b767e80835aa62fb4afa8be5e06d7 100644 (file)
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
        lan743x_hardware_cleanup(adapter);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
 {
        return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
 static const struct dev_pm_ops lan743x_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
 };
-#endif /*CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct pci_device_id lan743x_pcidev_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
        .id_table = lan743x_pcidev_tbl,
        .probe    = lan743x_pcidev_probe,
        .remove   = lan743x_pcidev_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .driver.pm = &lan743x_pm_ops,
 #endif
        .shutdown = lan743x_pcidev_shutdown,
index 0ba0356ec4e6dcd47977436bfe228891c0114b55..46ba0cf257c6d8cdb80b0d84e80e078618d11c36 100644 (file)
@@ -52,6 +52,7 @@
 #define NFP_FL_TUNNEL_CSUM                     cpu_to_be16(0x01)
 #define NFP_FL_TUNNEL_KEY                      cpu_to_be16(0x04)
 #define NFP_FL_TUNNEL_GENEVE_OPT               cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS     IP_TUNNEL_INFO_TX
 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS    (NFP_FL_TUNNEL_CSUM | \
                                                 NFP_FL_TUNNEL_KEY | \
                                                 NFP_FL_TUNNEL_GENEVE_OPT)
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
                nfp_fl_push_vlan(psh_v, a);
                *a_len += sizeof(struct nfp_fl_push_vlan);
        } else if (is_tcf_tunnel_set(a)) {
+               struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
                struct nfp_repr *repr = netdev_priv(netdev);
+
                *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
                if (*tun_type == NFP_FL_TUNNEL_NONE)
                        return -EOPNOTSUPP;
 
+               if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+                       return -EOPNOTSUPP;
+
                /* Pre-tunnel action is required for tunnel encap.
                 * This checks for next hop entries on NFP.
                 * If none, the packet falls back before applying other actions.
@@ -796,11 +802,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
                              struct net_device *netdev,
                              struct nfp_fl_payload *nfp_flow)
 {
-       int act_len, act_cnt, err, tun_out_cnt, out_cnt;
+       int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
        enum nfp_flower_tun_type tun_type;
        const struct tc_action *a;
        u32 csum_updated = 0;
-       LIST_HEAD(actions);
 
        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
        nfp_flow->meta.act_len = 0;
@@ -810,8 +815,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
        tun_out_cnt = 0;
        out_cnt = 0;
 
-       tcf_exts_to_list(flow->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, flow->exts) {
                err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
                                             netdev, &tun_type, &tun_out_cnt,
                                             &out_cnt, &csum_updated);
index 85f8209bf007e8f133fefd1bfbb7c7309bae2e7f..81d941ab895c9f5bd6267d74ca9501c8ef5dff98 100644 (file)
@@ -70,6 +70,7 @@ struct nfp_app;
 #define NFP_FL_FEATS_GENEVE            BIT(0)
 #define NFP_FL_NBI_MTU_SETTING         BIT(1)
 #define NFP_FL_FEATS_GENEVE_OPT                BIT(2)
+#define NFP_FL_FEATS_VLAN_PCP          BIT(3)
 #define NFP_FL_FEATS_LAG               BIT(31)
 
 struct nfp_fl_mask_id {
index a0c72f277faa1b6972b727393c68f23fb79bdb06..17acb8cc60440ee4272b642ccf62b62df90ef019 100644 (file)
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
                                                      FLOW_DISSECTOR_KEY_VLAN,
                                                      target);
                /* Populate the tci field. */
-               if (flow_vlan->vlan_id) {
+               if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
                        tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
                                             flow_vlan->vlan_priority) |
                                  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
index 2edab01c3beb6287a545b585b59f5f5b508d86e1..bd19624f10cf48e3d7187f6721ec6b3eecac98da 100644 (file)
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                key_size += sizeof(struct nfp_flower_mac_mpls);
        }
 
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_dissector_key_vlan *flow_vlan;
+
+               flow_vlan = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_VLAN,
+                                                     flow->mask);
+               if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
+                   flow_vlan->vlan_priority)
+                       return -EOPNOTSUPP;
+       }
+
        if (dissector_uses_key(flow->dissector,
                               FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
                struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
index a8b9fbab5f73391eaf20f10afb2c70f44ad44d29..253bdaef150557a7e20ded3de921021a051a9ca7 100644 (file)
@@ -229,29 +229,16 @@ done:
        spin_unlock_bh(&nn->reconfig_lock);
 }
 
-/**
- * nfp_net_reconfig() - Reconfigure the firmware
- * @nn:      NFP Net device to reconfigure
- * @update:  The value for the update field in the BAR config
- *
- * Write the update word to the BAR and ping the reconfig queue.  The
- * poll until the firmware has acknowledged the update by zeroing the
- * update word.
- *
- * Return: Negative errno on error, 0 on success
- */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
 {
        bool cancelled_timer = false;
        u32 pre_posted_requests;
-       int ret;
 
        spin_lock_bh(&nn->reconfig_lock);
 
        nn->reconfig_sync_present = true;
 
        if (nn->reconfig_timer_active) {
-               del_timer(&nn->reconfig_timer);
                nn->reconfig_timer_active = false;
                cancelled_timer = true;
        }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 
        spin_unlock_bh(&nn->reconfig_lock);
 
-       if (cancelled_timer)
+       if (cancelled_timer) {
+               del_timer_sync(&nn->reconfig_timer);
                nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+       }
 
        /* Run the posted reconfigs which were issued before we started */
        if (pre_posted_requests) {
                nfp_net_reconfig_start(nn, pre_posted_requests);
                nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
        }
+}
+
+static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
+{
+       nfp_net_reconfig_sync_enter(nn);
+
+       spin_lock_bh(&nn->reconfig_lock);
+       nn->reconfig_sync_present = false;
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig() - Reconfigure the firmware
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue.  The
+ * poll until the firmware has acknowledged the update by zeroing the
+ * update word.
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+       int ret;
+
+       nfp_net_reconfig_sync_enter(nn);
 
        nfp_net_reconfig_start(nn, update);
        ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -3633,6 +3649,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
+       WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
        if (nn->dp.netdev)
                free_netdev(nn->dp.netdev);
        else
@@ -3920,4 +3937,5 @@ void nfp_net_clean(struct nfp_net *nn)
                return;
 
        unregister_netdev(nn->dp.netdev);
+       nfp_net_reconfig_wait_posted(nn);
 }
index d9ab5add27a8bf06af92247b60158e6de2e316be..34193c2f169961a37617b84e675ee78df473d085 100644 (file)
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 
        if (i == QED_INIT_MAX_POLL_COUNT) {
                DP_ERR(p_hwfn,
-                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
                       addr, le32_to_cpu(cmd->expected_val),
                       val, le32_to_cpu(cmd->op_data));
        }
index d89a0e22f6e4307896cae0383889c7b5b8f05c22..5d37ec7e9b0b7b2bc3785ec00ce2e23314b7c955 100644 (file)
@@ -48,7 +48,7 @@
 #include "qed_reg_addr.h"
 #include "qed_sriov.h"
 
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US   10
 
 #define QED_DRV_MB_MAX_RETRIES (500 * 1000)    /* Account for 5 sec */
 #define QED_MCP_RESET_RETRIES  (50 * 1000)     /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
        return 0;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES  20
+#define QED_MCP_SHMEM_RDY_ITER_MS      50
+
 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+       u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+       u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
        u32 drv_mb_offsize, mfw_mb_offsize;
        u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
        p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-       if (!p_info->public_base)
-               return 0;
+       if (!p_info->public_base) {
+               DP_NOTICE(p_hwfn,
+                         "The address of the MCP scratch-pad is not configured\n");
+               return -EINVAL;
+       }
 
        p_info->public_base |= GRCBASE_MCP;
 
+       /* Get the MFW MB address and number of supported messages */
+       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+                                           p_info->mfw_mb_addr +
+                                           offsetof(struct public_mfw_mb,
+                                                    sup_msgs));
+
+       /* The driver can notify that there was an MCP reset, and might read the
+        * SHMEM values before the MFW has completed initializing them.
+        * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+        * data ready indication.
+        */
+       while (!p_info->mfw_mb_length && --cnt) {
+               msleep(msec);
+               p_info->mfw_mb_length =
+                       (u16)qed_rd(p_hwfn, p_ptt,
+                                   p_info->mfw_mb_addr +
+                                   offsetof(struct public_mfw_mb, sup_msgs));
+       }
+
+       if (!cnt) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to get the SHMEM ready notification after %d msec\n",
+                         QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+               return -EBUSY;
+       }
+
        /* Calculate the driver and MFW mailbox address */
        drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
                                SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
                   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-       /* Set the MFW MB address */
-       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
-                               SECTION_OFFSIZE_ADDR(p_info->public_base,
-                                                    PUBLIC_MFW_MB));
-       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
        /* Get the current driver mailbox sequence before sending
         * the first command
         */
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+       u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
        int rc = 0;
 
+       if (p_hwfn->mcp_info->b_block_cmd) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+               return -EBUSY;
+       }
+
        /* Ensure that only a single thread is accessing the mailbox */
        spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                   (p_mb_params->cmd | seq_num), p_mb_params->param);
 }
 
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+       p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+       DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+               block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+       u32 delay = QED_MCP_RESP_ITER_US;
+
+       cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+       cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       udelay(delay);
+       cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       udelay(delay);
+       cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+       DP_NOTICE(p_hwfn,
+                 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+                 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
 static int
 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                       struct qed_ptt *p_ptt,
                       struct qed_mcp_mb_params *p_mb_params,
-                      u32 max_retries, u32 delay)
+                      u32 max_retries, u32 usecs)
 {
+       u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
        struct qed_mcp_cmd_elem *p_cmd_elem;
-       u32 cnt = 0;
        u16 seq_num;
        int rc = 0;
 
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                        goto err;
 
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
-               udelay(delay);
+
+               if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+                       msleep(msecs);
+               else
+                       udelay(usecs);
        } while (++cnt < max_retries);
 
        if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                 * The spinlock stays locked until the list element is removed.
                 */
 
-               udelay(delay);
+               if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+                       msleep(msecs);
+               else
+                       udelay(usecs);
+
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
                if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn,
                          "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
                          p_mb_params->cmd, p_mb_params->param);
+               qed_mcp_print_cpu_info(p_hwfn, p_ptt);
 
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
                qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 
+               if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+                       qed_mcp_cmd_set_blocking(p_hwfn, true);
+
                return -EAGAIN;
        }
 
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
                   p_mb_params->mcp_resp,
                   p_mb_params->mcp_param,
-                  (cnt * delay) / 1000, (cnt * delay) % 1000);
+                  (cnt * usecs) / 1000, (cnt * usecs) % 1000);
 
        /* Clear the sequence number from the MFW response */
        p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 {
        size_t union_data_size = sizeof(union drv_union_data);
        u32 max_retries = QED_DRV_MB_MAX_RETRIES;
-       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 usecs = QED_MCP_RESP_ITER_US;
 
        /* MCP not initialized */
        if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EBUSY;
        }
 
+       if (p_hwfn->mcp_info->b_block_cmd) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+               return -EBUSY;
+       }
+
        if (p_mb_params->data_src_size > union_data_size ||
            p_mb_params->data_dst_size > union_data_size) {
                DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+               max_retries = DIV_ROUND_UP(max_retries, 1000);
+               usecs *= 1000;
+       }
+
        return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
-                                     delay);
+                                     usecs);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
        mb_params.data_src_size = sizeof(load_req);
        mb_params.p_data_dst = &load_rsp;
        mb_params.data_dst_size = sizeof(load_rsp);
+       mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
                   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 wol_param, mcp_resp, mcp_param;
+       struct qed_mcp_mb_params mb_params;
+       u32 wol_param;
 
        switch (p_hwfn->cdev->wol_config) {
        case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
        }
 
-       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
-                          &mcp_resp, &mcp_param);
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+       mb_params.param = wol_param;
+       mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+       return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 }
 
 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -2077,31 +2172,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS          10
+#define QED_MCP_HALT_MAX_RETRIES       10
+
 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 resp = 0, param = 0;
+       u32 resp = 0, param = 0, cpu_state, cnt = 0;
        int rc;
 
        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
                         &param);
-       if (rc)
+       if (rc) {
                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
 
-       return rc;
+       do {
+               msleep(QED_MCP_HALT_SLEEP_MS);
+               cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+               if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+                       break;
+       } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+       if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+                         qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+               return -EBUSY;
+       }
+
+       qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+       return 0;
 }
 
+#define QED_MCP_RESUME_SLEEP_MS        10
+
 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 value, cpu_mode;
+       u32 cpu_mode, cpu_state;
 
        qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
 
-       value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
-       value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
-       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
        cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+       msleep(QED_MCP_RESUME_SLEEP_MS);
+       cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
 
-       return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+       if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+                         cpu_mode, cpu_state);
+               return -EBUSY;
+       }
+
+       qed_mcp_cmd_set_blocking(p_hwfn, false);
+
+       return 0;
 }
 
 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
index 047976d5c6e962e19323bd5fb6ce99abbd92aa2e..85e6b3989e7a913c7157f27ff27469e6cbf1f3aa 100644 (file)
@@ -635,11 +635,14 @@ struct qed_mcp_info {
         */
        spinlock_t                              cmd_lock;
 
+       /* Flag to indicate whether sending a MFW mailbox command is blocked */
+       bool                                    b_block_cmd;
+
        /* Spinlock used for syncing SW link-changes and link-changes
         * originating from attention context.
         */
        spinlock_t                              link_lock;
-       bool                                    block_mb_sending;
+
        u32                                     public_base;
        u32                                     drv_mb_addr;
        u32                                     mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
 };
 
 struct qed_mcp_mb_params {
-       u32                     cmd;
-       u32                     param;
-       void                    *p_data_src;
-       u8                      data_src_size;
-       void                    *p_data_dst;
-       u8                      data_dst_size;
-       u32                     mcp_resp;
-       u32                     mcp_param;
+       u32 cmd;
+       u32 param;
+       void *p_data_src;
+       void *p_data_dst;
+       u8 data_src_size;
+       u8 data_dst_size;
+       u32 mcp_resp;
+       u32 mcp_param;
+       u32 flags;
+#define QED_MB_FLAG_CAN_SLEEP  (0x1 << 0)
+#define QED_MB_FLAG_AVOID_BLOCK        (0x1 << 1)
+#define QED_MB_FLAGS_IS_SET(params, flag) \
+       ({ typeof(params) __params = (params); \
+          (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
 };
 
 struct qed_drv_tlv_hdr {
index d8ad2dcad8d5ef589252eb797556359ebe89fbcb..f736f70956fd3763f3f94bffa0e009dec28b9516 100644 (file)
        0
 #define MCP_REG_CPU_STATE \
        0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED  (0x1UL << 10)
 #define MCP_REG_CPU_EVENT_MASK \
        0xe05008UL
+#define MCP_REG_CPU_PROGRAM_COUNTER    0xe0501cUL
 #define PGLUE_B_REG_PF_BAR0_SIZE \
        0x2aae60UL
 #define PGLUE_B_REG_PF_BAR1_SIZE \
index 9673d19308e65c37a7ad42e5752ac9f8238b7d4f..b16ce7d93caff5802e41b79ecb8e8b8e5fc78ed5 100644 (file)
@@ -2006,18 +2006,16 @@ unlock:
 static int qede_parse_actions(struct qede_dev *edev,
                              struct tcf_exts *exts)
 {
-       int rc = -EINVAL, num_act = 0;
+       int rc = -EINVAL, num_act = 0, i;
        const struct tc_action *a;
        bool is_drop = false;
-       LIST_HEAD(actions);
 
        if (!tcf_exts_has_actions(exts)) {
                DP_NOTICE(edev, "No tc actions received\n");
                return rc;
        }
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                num_act++;
 
                if (is_tcf_gact_shot(a))
index 353f1c129af1e247e1a8ddaa6d316edfc285682d..059ba9429e51a3193f3af73c16a20ce94ef42844 100644 (file)
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
        return status;
 }
 
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
-       netdev_features_t features)
-{
-       int err;
-
-       /* Update the behavior of vlan accel in the adapter */
-       err = qlge_update_hw_vlan_features(ndev, features);
-       if (err)
-               return err;
-
-       return features;
-}
-
 static int qlge_set_features(struct net_device *ndev,
        netdev_features_t features)
 {
        netdev_features_t changed = ndev->features ^ features;
+       int err;
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+               /* Update the behavior of vlan accel in the adapter */
+               err = qlge_update_hw_vlan_features(ndev, features);
+               if (err)
+                       return err;
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                qlge_vlan_mode(ndev, features);
+       }
 
        return 0;
 }
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
        .ndo_set_mac_address    = qlge_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = qlge_tx_timeout,
-       .ndo_fix_features       = qlge_fix_features,
        .ndo_set_features       = qlge_set_features,
        .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
index ffe7a16bdfc840d859292f6939938b75fba15a41..6c8543fb90c0a3ac780edd36aa701b01e7c9d94a 100644 (file)
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
 {
        __be16 rx_data;
        __be16 tx_data;
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+       *result = 0;
+
+       transfer[0].tx_buf = &tx_data;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = &rx_data;
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
 
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = &rx_data;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
 qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
 {
        __be16 tx_data[2];
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
        tx_data[1] = cpu_to_be16(value);
 
+       transfer[0].tx_buf = &tx_data[0];
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = &tx_data[1];
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = &tx_data[1];
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
index 206f0266463e362a0e34fe8ff5b626519500e2ed..66b775d462fd8ed111dbb18e845ad4d9af1b6d2b 100644 (file)
@@ -99,22 +99,24 @@ static u32
 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 {
        __be16 cmd;
-       struct spi_message *msg = &qca->spi_msg2;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_message msg;
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = src;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
+       transfer.tx_buf = src;
+       transfer.len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != len)) {
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg2;
+       struct spi_message msg;
        __be16 cmd;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = dst;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 static u32
 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       transfer.rx_buf = dst;
+       transfer.len = len;
 
-       if (ret || (msg->actual_length != len)) {
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
+
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -195,19 +205,23 @@ static int
 qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
 {
        __be16 tx_data;
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(cmd);
-       transfer->len = sizeof(tx_data);
-       transfer->tx_buf = &tx_data;
-       transfer->rx_buf = NULL;
+       transfer.len = sizeof(cmd);
+       transfer.tx_buf = &tx_data;
+       spi_message_add_tail(&transfer, &msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
        qca = netdev_priv(dev);
        memset(qca, 0, sizeof(struct qcaspi));
 
-       memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
-       memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
-
-       spi_message_init(&qca->spi_msg1);
-       spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
-
-       spi_message_init(&qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
-
        memset(&qca->txr, 0, sizeof(qca->txr));
        qca->txr.count = TX_RING_MAX_LEN;
 }
index fc4beb1b32d1a070a101b3c48cc092bd14561150..fc0e98726b3613ddd3774169fa13aa6099a5c6e5 100644 (file)
@@ -83,11 +83,6 @@ struct qcaspi {
        struct tx_ring txr;
        struct qcaspi_stats stats;
 
-       struct spi_message spi_msg1;
-       struct spi_message spi_msg2;
-       struct spi_transfer spi_xfer1;
-       struct spi_transfer spi_xfer2[2];
-
        u8 *rx_buffer;
        u32 buffer_size;
        u8 sync;
index 0efa977c422dd5a32a8a241a5c32b6fc2cd71664..bb529ff2ca818c9218f3a0ecc52b2f5fdd6af488 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
@@ -218,6 +219,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8161), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
+       { PCI_DEVICE(PCI_VENDOR_ID_NCUBE,       0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
        { PCI_VENDOR_ID_DLINK,                  0x4300,
                PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
@@ -630,7 +632,7 @@ struct rtl8169_tc_offsets {
 };
 
 enum rtl_flag {
-       RTL_FLAG_TASK_ENABLED,
+       RTL_FLAG_TASK_ENABLED = 0,
        RTL_FLAG_TASK_SLOW_PENDING,
        RTL_FLAG_TASK_RESET_PENDING,
        RTL_FLAG_MAX
@@ -664,6 +666,7 @@ struct rtl8169_private {
 
        u16 event_slow;
        const struct rtl_coalesce_info *coalesce_info;
+       struct clk *clk;
 
        struct mdio_ops {
                void (*write)(struct rtl8169_private *, int, int);
@@ -4522,7 +4525,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        rtl_hw_reset(tp);
 }
 
-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
        /* Set DMA burst size and Interframe Gap Time */
        RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
@@ -4633,12 +4636,14 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
 
        rtl_set_rx_max_size(tp);
        rtl_set_rx_tx_desc_registers(tp);
-       rtl_set_rx_tx_config_registers(tp);
        RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R8(tp, IntrMask);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+       rtl_init_rxcfg(tp);
+       rtl_set_tx_config_registers(tp);
+
        rtl_set_rx_mode(tp->dev);
        /* no early-rx interrupts */
        RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
@@ -4772,12 +4777,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
 {
        if (enable) {
-               RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
                RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+               RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
        } else {
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
                RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
        }
+
+       udelay(10);
 }
 
 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5622,6 +5629,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8106(struct rtl8169_private *tp)
 {
+       rtl_hw_aspm_clkreq_enable(tp, false);
+
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
@@ -5630,6 +5639,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
 
        rtl_pcie_state_l2l3_enable(tp, false);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -6652,7 +6662,8 @@ static int rtl8169_close(struct net_device *dev)
        rtl8169_update_counters(tp);
 
        rtl_lock_work(tp);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
        rtl8169_down(dev);
        rtl_unlock_work(tp);
@@ -6835,7 +6846,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
 
        rtl_lock_work(tp);
        napi_disable(&tp->napi);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
        rtl_unlock_work(tp);
 
        rtl_pll_power_down(tp);
@@ -7251,6 +7264,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
        }
 }
 
+static void rtl_disable_clk(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
 static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7271,6 +7289,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
        tp->supports_gmii = cfg->has_gmii;
 
+       /* Get the *optional* external "ether_clk" used on some boards */
+       tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
+       if (IS_ERR(tp->clk)) {
+               rc = PTR_ERR(tp->clk);
+               if (rc == -ENOENT) {
+                       /* clk-core allows NULL (for suspend / resume) */
+                       tp->clk = NULL;
+               } else if (rc == -EPROBE_DEFER) {
+                       return rc;
+               } else {
+                       dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
+                       return rc;
+               }
+       } else {
+               rc = clk_prepare_enable(tp->clk);
+               if (rc) {
+                       dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
+                       return rc;
+               }
+
+               rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
+                                             tp->clk);
+               if (rc)
+                       return rc;
+       }
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
        if (rc < 0) {
index f3f7477043ce106155ca30ba7c07fb7d20e968bc..bb0ebdfd4459b5d9849e65989ee1209360e7dca7 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Renesas device configuration
 #
index a05102a7df02150cbfae6644eadeb971e35d7a3a..f21ab8c02af0e2263b46db68321192d3cd2850dc 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the Renesas device drivers.
 #
index b81f4faf7b10114df1f17a0c0d80881e8ea9c5ea..1470fc12282b255181838457ff6a07a362e3e321 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Renesas Ethernet AVB device driver
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
  */
 
 #ifndef __RAVB_H__
index c06f2df895c2c3e432fc8341f15cc77b0550db11..aff5516b781e27067efc9c1cca19f11bea19dd19 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Renesas Ethernet AVB device driver
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
  */
 
 #include <linux/cache.h>
index eede70ec37f8c02759727ff7e881464adf7fe707..0721b5c35d91d613256159763c7e4430247da3af 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /* PTP 1588 clock using the Renesas Ethernet AVB
  *
  * Copyright (C) 2013-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
  */
 
 #include "ravb.h"
index 5573199c4536c283164351ca17d4377ad5a6c6a2..f27a0dc8c56331db3f7063c251333b04716f0365 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
  *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
  *  Copyright (C) 2014 Codethink Limited
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  The full GNU General Public License is included in this distribution in
- *  the file called "COPYING".
  */
 
 #include <linux/module.h>
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
        .magic          = 1,
        .cexcr          = 1,
 };
+
+/* R7S9210 */
+static struct sh_eth_cpu_data r7s9210_data = {
+       .soft_reset     = sh_eth_soft_reset,
+
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_rcar,
+
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
+       .edtrr_trns     = EDTRR_TRNS_ETHER,
+       .ecsr_value     = ECSR_ICD,
+       .ecsipr_value   = ECSIPR_ICDIP,
+       .eesipr_value   = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
+                         EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
+                         EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
+                         EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
+                         EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
+
+       .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
+
+       .fdr_value      = 0x0000070f,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .hw_swap        = 1,
+       .rpadir         = 1,
+       .no_ade         = 1,
+       .xdfar_rw       = 1,
+};
 #endif /* CONFIG_OF */
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
        { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
        { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
        { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+       { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
        { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
        { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
        { }
index f94be99cf4002190347014d7643387883556981a..0c18650bbfe69f7c22a78a87b1c263bd4f2e15e5 100644 (file)
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2012 Renesas Solutions Corp.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  The full GNU General Public License is included in this distribution in
- *  the file called "COPYING".
  */
 
 #ifndef __SH_ETH_H__
index edf20361ea5f15c7ddee617f899e31b92d7e261e..324049eebb9b140a1ca9a2c65de1eaf82048d263 100644 (file)
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
        select PHYLIB
        select CRC32
        select MII
-       depends on OF && COMMON_CLK && HAS_DMA
+       depends on OF && HAS_DMA
        help
          Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
 
@@ -57,7 +57,7 @@ config DWMAC_ANARION
 config DWMAC_IPQ806X
        tristate "QCA IPQ806x DWMAC support"
        default ARCH_QCOM
-       depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+       depends on OF && (ARCH_QCOM || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
 config DWMAC_ROCKCHIP
        tristate "Rockchip dwmac support"
        default ARCH_ROCKCHIP
-       depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
 
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
-       default ARCH_SOCFPGA
+       default (ARCH_SOCFPGA || ARCH_STRATIX10)
        depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
 config DWMAC_STI
        tristate "STi GMAC support"
        default ARCH_STI
-       depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST)
+       depends on OF && (ARCH_STI || COMPILE_TEST)
        select MFD_SYSCON
        ---help---
          Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
 config DWMAC_SUNXI
        tristate "Allwinner GMAC support"
        default ARCH_SUNXI
-       depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST)
+       depends on OF && (ARCH_SUNXI || COMPILE_TEST)
        ---help---
          Support for Allwinner A20/A31 GMAC ethernet controllers.
 
index 76649adf8fb0639e66094cb29f83ed58f124bf2c..c0a855b7ab3b4304a0ba734117ee63d903701df8 100644 (file)
@@ -112,7 +112,6 @@ struct stmmac_priv {
        u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       bool tx_timer_armed;
 
        int tx_coalesce;
        int hwts_tx_en;
index ff1ffb46198a7614bb282e992c8e0a3f58052893..9f458bb16f2a6edb6ab8cca33c00176b1cce1d67 100644 (file)
@@ -3147,16 +3147,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * element in case of no SG.
         */
        priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
-           !priv->tx_timer_armed) {
+       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
                mod_timer(&priv->txtimer,
                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
-               priv->tx_timer_armed = true;
        } else {
                priv->tx_count_frames = 0;
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-               priv->tx_timer_armed = false;
        }
 
        skb_tx_timestamp(skb);
index 3609c7b696c7a0b22e8c88cc1854495aa80b65fe..2b800ce1d5bf42ca23b378d4b9c8c35addc41da9 100644 (file)
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
  * Description:
  * This function validates the number of Unicast address entries supported
  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 132, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
  * logic. This function validates a valid, supported configuration is
  * selected, and defaults to 1 Unicast address if an unsupported
  * configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
        int x = ucast_entries;
 
        switch (x) {
-       case 1:
-       case 32:
+       case 1 ... 32:
        case 64:
        case 128:
                break;
index 1a96dd9c1091e6c515753132c4a3fd0128f41bdf..531294f4978bc42bbb0e3cb0177b1312a33bef61 100644 (file)
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
        struct stmmac_tc_entry *action_entry = entry;
        const struct tc_action *act;
        struct tcf_exts *exts;
-       LIST_HEAD(actions);
+       int i;
 
        exts = cls->knode.exts;
        if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
        if (frag)
                action_entry = frag;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(act, &actions, list) {
+       tcf_exts_for_each_action(i, act, exts) {
                /* Accept */
                if (is_tcf_gact_ok(act)) {
                        action_entry->val.af = 1;
index 9263d638bd6d0ffaec45aa1da2fd57b981faa38d..f932923f7d5619b1d8b2aa7cb66acf15b1c608f5 100644 (file)
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
 config TI_DAVINCI_CPDMA
        tristate "TI DaVinci CPDMA Support"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+       select GENERIC_ALLOCATOR
        ---help---
          This driver supports TI's DaVinci CPDMA dma engine.
 
index 0c1adad7415da7d9b858925d0ec5715e9ca7dfec..396e1cd1066796b815cedf4ab172215aaf238f33 100644 (file)
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
        struct device_node *node;
        struct cpsw_phy_sel_priv *priv;
 
-       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
        if (!node) {
-               dev_err(dev, "Phy mode driver DT not found\n");
-               return;
+               node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+               if (!node) {
+                       dev_err(dev, "Phy mode driver DT not found\n");
+                       return;
+               }
        }
 
        dev = bus_find_device(&platform_bus_type, NULL, node, match);
index 31c3d77b4733f0aa9900138b5c49f398d0642db4..fe01e141c8f87d50e42a5cb2670ff5ba4921744a 100644 (file)
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
 
        net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
        net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+       netdev_info(ndev, "VF slot %u %s\n",
+                   net_device_ctx->vf_serial,
+                   net_device_ctx->vf_alloc ? "added" : "removed");
 }
 
 static  void netvsc_receive_inband(struct net_device *ndev,
index 507f68190cb1b4cbf88b8a441a040f8aacafd1d4..3af6d8d15233756e411500a8753d2789d544f076 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -1893,20 +1894,6 @@ out_unlock:
        rtnl_unlock();
 }
 
-static struct net_device *get_netvsc_bymac(const u8 *mac)
-{
-       struct net_device_context *ndev_ctx;
-
-       list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
-               struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
-
-               if (ether_addr_equal(mac, dev->perm_addr))
-                       return dev;
-       }
-
-       return NULL;
-}
-
 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
 {
        struct net_device_context *net_device_ctx;
@@ -2035,22 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
        rtnl_unlock();
 }
 
+/* Find netvsc by VMBus serial number.
+ * The PCI hyperv controller records the serial number as the slot.
+ */
+static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+{
+       struct device *parent = vf_netdev->dev.parent;
+       struct net_device_context *ndev_ctx;
+       struct pci_dev *pdev;
+
+       if (!parent || !dev_is_pci(parent))
+               return NULL; /* not a PCI device */
+
+       pdev = to_pci_dev(parent);
+       if (!pdev->slot) {
+               netdev_notice(vf_netdev, "no PCI slot information\n");
+               return NULL;
+       }
+
+       list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+               if (!ndev_ctx->vf_alloc)
+                       continue;
+
+               if (ndev_ctx->vf_serial == pdev->slot->number)
+                       return hv_get_drvdata(ndev_ctx->device_ctx);
+       }
+
+       netdev_notice(vf_netdev,
+                     "no netdev found for slot %u\n", pdev->slot->number);
+       return NULL;
+}
+
 static int netvsc_register_vf(struct net_device *vf_netdev)
 {
-       struct net_device *ndev;
        struct net_device_context *net_device_ctx;
        struct netvsc_device *netvsc_dev;
+       struct net_device *ndev;
        int ret;
 
        if (vf_netdev->addr_len != ETH_ALEN)
                return NOTIFY_DONE;
 
-       /*
-        * We will use the MAC address to locate the synthetic interface to
-        * associate with the VF interface. If we don't find a matching
-        * synthetic interface, move on.
-        */
-       ndev = get_netvsc_bymac(vf_netdev->perm_addr);
+       ndev = get_netvsc_byslot(vf_netdev);
        if (!ndev)
                return NOTIFY_DONE;
 
@@ -2201,6 +2214,16 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       /* We must get rtnl lock before scheduling nvdev->subchan_work,
+        * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+        * all subchannels to show up, but that may not happen because
+        * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+        * -> ... -> device_add() -> ... -> __device_attach() can't get
+        * the device lock, so all the subchannels can't be processed --
+        * finally netvsc_subchan_work() hangs for ever.
+        */
+       rtnl_lock();
+
        if (nvdev->num_chn > 1)
                schedule_work(&nvdev->subchan_work);
 
@@ -2219,7 +2242,6 @@ static int netvsc_probe(struct hv_device *dev,
        else
                net->max_mtu = ETH_DATA_LEN;
 
-       rtnl_lock();
        ret = register_netdevice(net);
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
@@ -2258,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
 
        cancel_delayed_work_sync(&ndev_ctx->dwork);
 
-       rcu_read_lock();
-       nvdev = rcu_dereference(ndev_ctx->nvdev);
-
-       if  (nvdev)
+       rtnl_lock();
+       nvdev = rtnl_dereference(ndev_ctx->nvdev);
+       if (nvdev)
                cancel_work_sync(&nvdev->subchan_work);
 
        /*
         * Call to the vsc driver to let it know that the device is being
         * removed. Also blocks mtu and channel changes.
         */
-       rtnl_lock();
        vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
        if (vf_netdev)
                netvsc_unregister_vf(vf_netdev);
@@ -2280,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
        list_del(&ndev_ctx->list);
 
        rtnl_unlock();
-       rcu_read_unlock();
 
        hv_set_drvdata(dev, NULL);
 
index 4637d980310e1503fc314004d18906ee2c1ac50f..52fffb98fde9ac3fd05c7f6fd8e5dc123ecae341 100644 (file)
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
        switch (type) {
        case hwmon_temp:
                switch (attr) {
-               case hwmon_temp_input:
                case hwmon_temp_min_alarm:
                case hwmon_temp_max_alarm:
                case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_temp_max:
                case hwmon_temp_lcrit:
                case hwmon_temp_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_temp_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_in:
                switch (attr) {
-               case hwmon_in_input:
                case hwmon_in_min_alarm:
                case hwmon_in_max_alarm:
                case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_in_max:
                case hwmon_in_lcrit:
                case hwmon_in_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_in_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_curr:
                switch (attr) {
-               case hwmon_curr_input:
                case hwmon_curr_min_alarm:
                case hwmon_curr_max_alarm:
                case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_curr_max:
                case hwmon_curr_lcrit:
                case hwmon_curr_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_curr_input:
                        return 0444;
                default:
                        return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                    channel == 1)
                        return 0;
                switch (attr) {
-               case hwmon_power_input:
                case hwmon_power_min_alarm:
                case hwmon_power_max_alarm:
                case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_power_max:
                case hwmon_power_lcrit:
                case hwmon_power_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_power_input:
                        return 0444;
                default:
                        return 0;
index ce61231e96ea5fe27f512fbd0d80d4609997e508..62dc564b251d5e0c2019035355aa17426471cc8e 100644 (file)
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!skb)
                goto out;
 
+       if (skb_mac_header_len(skb) < ETH_HLEN)
+               goto drop;
+
        if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
                goto drop;
 
index cb0cc30c3d6a190e8d3132b6bab4c5d67e29979c..533b6fb8d923161ad34b539883713bd3e5a65af4 100644 (file)
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Quectel EP06/EG06/EM06 */
+               USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+                                             USB_CLASS_VENDOR_SPEC,
+                                             USB_SUBCLASS_VENDOR_SPEC,
+                                             0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
+       },
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
@@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9063, 8)},    /* Sierra Wireless EM7305 */
        {QMI_FIXED_INTF(0x1199, 0x9063, 10)},   /* Sierra Wireless EM7305 */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9079, 8)},    /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x907b, 8)},    /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x907b, 10)},   /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9091, 8)},    /* Sierra Wireless EM7565 */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
-       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
        return false;
 }
 
+static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+{
+       struct usb_device *dev = interface_to_usbdev(intf);
+       struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+
+       if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+           le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+           intf_desc.bNumEndpoints == 2)
+               return true;
+
+       return false;
+}
+
 static int qmi_wwan_probe(struct usb_interface *intf,
                          const struct usb_device_id *prod)
 {
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
                return -ENODEV;
        }
 
+       /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+        * we need to match on class/subclass/protocol. These values are
+        * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+        * different. Ignore the current interface if the number of endpoints
+        * the number for the diag interface (two).
+        */
+       if (quectel_ep06_diag_detected(intf))
+               return -ENODEV;
+
        return usbnet_probe(intf, id);
 }
 
index 97742708460bc8a415713537ff90060c07b07d27..2cd71bdb6484c774659598fff1e99cd49181337b 100644 (file)
@@ -5217,8 +5217,8 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
-       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
-           udev->serial && !strcmp(udev->serial, "000001000000")) {
+       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
+           (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
                dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
                set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
        }
index 8d679c8b7f25c753d77cfb8821d9d2528c9c9048..41a00cd76955bf39047fafc0a1641eb347c6ee21 100644 (file)
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
        int mac_len, delta, off;
        struct xdp_buff xdp;
 
+       skb_orphan(skb);
+
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
                skb_copy_header(nskb, skb);
                head_off = skb_headroom(nskb) - skb_headroom(skb);
                skb_headers_offset_update(nskb, head_off);
-               if (skb->sk)
-                       skb_set_owner_w(nskb, skb->sk);
                consume_skb(skb);
                skb = nskb;
        }
index b4c3a957c102d2463fde9d4987dcdf3764e2a694..73969dbeb5c5ff390709dea24cbd61da794d9a3a 100644 (file)
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd, *copy_rd;
-       int size_of_regd, regd_to_copy, wmms_to_copy;
-       int size_of_wmms = 0;
+       int size_of_regd, regd_to_copy;
        struct ieee80211_reg_rule *rule;
-       struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
        struct regdb_ptrs *regdb_ptrs;
        enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
-       int valid_rules = 0, n_wmms = 0;
-       int i;
+       int valid_rules = 0;
        bool new_rule;
        int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                sizeof(struct ieee80211_regdomain) +
                num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-       if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
-               size_of_wmms =
-                       num_of_ch * sizeof(struct ieee80211_wmm_rule);
-
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd->alpha2[0] = fw_mcc >> 8;
        regd->alpha2[1] = fw_mcc & 0xff;
 
-       wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                    band == NL80211_BAND_2GHZ)
                        continue;
 
-               if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
-                                        &regdb_ptrs[n_wmms].token, wmm_rule)) {
-                       /* Add only new rules */
-                       for (i = 0; i < n_wmms; i++) {
-                               if (regdb_ptrs[i].token ==
-                                   regdb_ptrs[n_wmms].token) {
-                                       rule->wmm_rule = regdb_ptrs[i].rule;
-                                       break;
-                               }
-                       }
-                       if (i == n_wmms) {
-                               rule->wmm_rule = wmm_rule;
-                               regdb_ptrs[n_wmms++].rule = wmm_rule;
-                               wmm_rule++;
-                       }
-               }
+               reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
        }
 
        regd->n_reg_rules = valid_rules;
-       regd->n_wmm_rules = n_wmms;
 
        /*
         * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd_to_copy = sizeof(struct ieee80211_regdomain) +
                valid_rules * sizeof(struct ieee80211_reg_rule);
 
-       wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
-
-       copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+       copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
        if (!copy_rd) {
                copy_rd = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        memcpy(copy_rd, regd, regd_to_copy);
-       memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
-              wmms_to_copy);
-
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
-       for (i = 0; i < regd->n_reg_rules; i++) {
-               if (!regd->reg_rules[i].wmm_rule)
-                       continue;
-
-               copy_rd->reg_rules[i].wmm_rule = d_wmm +
-                       (regd->reg_rules[i].wmm_rule - s_wmm);
-       }
 
 out:
        kfree(regdb_ptrs);
index 998dfac0fcff359d3727fb5df313e257484378b6..1068757ec42e4784942e69c00161ae4e1ee16548 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
+#include <linux/nospec.h>
 #include "mac80211_hwsim.h"
 
 #define WARN_QUEUE 100
@@ -2820,9 +2821,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_TXSTBC |
-                               IEEE80211_VHT_CAP_RXSTBC_1 |
-                               IEEE80211_VHT_CAP_RXSTBC_2 |
-                               IEEE80211_VHT_CAP_RXSTBC_3 |
                                IEEE80211_VHT_CAP_RXSTBC_4 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
                        sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -3317,6 +3315,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (param.channels < 1) {
+               GENL_SET_ERR_MSG(info, "must have at least one channel");
+               return -EINVAL;
+       }
+
        if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
                GENL_SET_ERR_MSG(info, "too many channels specified");
                return -EINVAL;
@@ -3350,6 +3353,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        kfree(hwname);
                        return -EINVAL;
                }
+
+               idx = array_index_nospec(idx,
+                                        ARRAY_SIZE(hwsim_world_regdom_custom));
                param.regd = hwsim_world_regdom_custom[idx];
        }
 
index 73f596a90c691a471a55951264b1a6fedf189e6e..f17f602e6171203acd39ee448c305cba719ff1de 100644 (file)
@@ -87,8 +87,7 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 
 struct netfront_stats {
        u64                     packets;
@@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+                       queue->rx.rsp_cons = ++cons;
+                       kfree_skb(nskb);
+                       return ~0U;
+               }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                skb_frag_page(nfrag),
@@ -1046,6 +1049,8 @@ err:
                skb->len += rx->status;
 
                i = xennet_fill_frags(queue, skb, &tmpq);
+               if (unlikely(i == ~0U))
+                       goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
                        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netif_carrier_off(netdev);
 
        xenbus_switch_state(dev, XenbusStateInitialising);
-       wait_event(module_load_q,
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateClosed &&
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateUnknown);
+       wait_event(module_wq,
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateClosed &&
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateUnknown);
        return netdev;
 
  exit:
@@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
 
        dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
 
+       wake_up_all(&module_wq);
+
        switch (backend_state) {
        case XenbusStateInitialising:
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-               break;
-
        case XenbusStateUnknown:
-               wake_up_all(&module_unload_q);
                break;
 
        case XenbusStateInitWait:
@@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosed:
-               wake_up_all(&module_unload_q);
                if (dev->state == XenbusStateClosed)
                        break;
                /* Missed the backend's CLOSING state -- fallthrough */
        case XenbusStateClosing:
-               wake_up_all(&module_unload_q);
                xenbus_frontend_closed(dev);
                break;
        }
@@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
 
        if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
                xenbus_switch_state(dev, XenbusStateClosing);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosing ||
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateUnknown);
 
                xenbus_switch_state(dev, XenbusStateClosed);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosed ||
                           xenbus_read_driver_state(dev->otherend) ==
index 27902a8799b1e022049078efc2ce14269dbea672..8aae6dcc839fed90a76e31eaf35a5f47ac04957d 100644 (file)
@@ -812,9 +812,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
                 * overshoots the remainder by 4 bytes, assume it was
                 * including 'status'.
                 */
-               if (out_field[1] - 8 == remainder)
+               if (out_field[1] - 4 == remainder)
                        return remainder;
-               return out_field[1] - 4;
+               return out_field[1] - 8;
        } else if (cmd == ND_CMD_CALL) {
                struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
 
index 233907889f96a36d6a3b87fc79cf82cabf602def..6c8fb7590838668c537aa09cfb289ed82f7fa563 100644 (file)
@@ -34,6 +34,9 @@ static int nvdimm_probe(struct device *dev)
                return rc;
        }
 
+       /* reset locked, to be validated below... */
+       nvdimm_clear_locked(dev);
+
        ndd = kzalloc(sizeof(*ndd), GFP_KERNEL);
        if (!ndd)
                return -ENOMEM;
@@ -48,12 +51,30 @@ static int nvdimm_probe(struct device *dev)
        get_device(dev);
        kref_init(&ndd->kref);
 
+       /*
+        * EACCES failures reading the namespace label-area-properties
+        * are interpreted as the DIMM capacity being locked but the
+        * namespace labels themselves being accessible.
+        */
        rc = nvdimm_init_nsarea(ndd);
-       if (rc == -EACCES)
+       if (rc == -EACCES) {
+               /*
+                * See nvdimm_namespace_common_probe() where we fail to
+                * allow namespaces to probe while the DIMM is locked,
+                * but we do allow for namespace enumeration.
+                */
                nvdimm_set_locked(dev);
+               rc = 0;
+       }
        if (rc)
                goto err;
 
+       /*
+        * EACCES failures reading the namespace label-data are
+        * interpreted as the label area being locked in addition to the
+        * DIMM capacity. We fail the dimm probe to prevent regions from
+        * attempting to parse the label area.
+        */
        rc = nvdimm_init_config_data(ndd);
        if (rc == -EACCES)
                nvdimm_set_locked(dev);
@@ -72,7 +93,6 @@ static int nvdimm_probe(struct device *dev)
                if (rc == 0)
                        nvdimm_set_aliasing(dev);
        }
-       nvdimm_clear_locked(dev);
        nvdimm_bus_unlock(dev);
 
        if (rc)
index 8d348b22ba453a58938d9fa921dd890109a4c7de..863cabc352159c54e3beb6659519418aadc3c0bc 100644 (file)
@@ -536,6 +536,37 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
        return info.available;
 }
 
+/**
+ * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
+ *                        contiguous unallocated dpa range.
+ * @nd_region: constrain available space check to this reference region
+ * @nd_mapping: container of dpa-resource-root + labels
+ */
+resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
+                                          struct nd_mapping *nd_mapping)
+{
+       struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+       struct nvdimm_bus *nvdimm_bus;
+       resource_size_t max = 0;
+       struct resource *res;
+
+       /* if a dimm is disabled the available capacity is zero */
+       if (!ndd)
+               return 0;
+
+       nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+       if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
+               return 0;
+       for_each_dpa_resource(ndd, res) {
+               if (strcmp(res->name, "pmem-reserve") != 0)
+                       continue;
+               if (resource_size(res) > max)
+                       max = resource_size(res);
+       }
+       release_free_pmem(nvdimm_bus, nd_mapping);
+       return max;
+}
+
 /**
  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
  * @nd_mapping: container of dpa-resource-root + labels
index 28afdd668905019472d802be06695bfc06897b48..4a4266250c28cfde56b08a2aa7ce04a27d6e6527 100644 (file)
@@ -799,7 +799,7 @@ static int merge_dpa(struct nd_region *nd_region,
        return 0;
 }
 
-static int __reserve_free_pmem(struct device *dev, void *data)
+int __reserve_free_pmem(struct device *dev, void *data)
 {
        struct nvdimm *nvdimm = data;
        struct nd_region *nd_region;
@@ -836,7 +836,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
        return 0;
 }
 
-static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
+void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
                struct nd_mapping *nd_mapping)
 {
        struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -1032,7 +1032,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
                allocated += nvdimm_allocated_dpa(ndd, &label_id);
        }
-       available = nd_region_available_dpa(nd_region);
+       available = nd_region_allocatable_dpa(nd_region);
 
        if (val > available + allocated)
                return -ENOSPC;
@@ -1144,6 +1144,26 @@ resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
 }
 EXPORT_SYMBOL(nvdimm_namespace_capacity);
 
+bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
+{
+       int i;
+       bool locked = false;
+       struct device *dev = &ndns->dev;
+       struct nd_region *nd_region = to_nd_region(dev->parent);
+
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+               if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
+                       dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
+                       locked = true;
+               }
+       }
+       return locked;
+}
+EXPORT_SYMBOL(nvdimm_namespace_locked);
+
 static ssize_t size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
@@ -1695,6 +1715,9 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
                }
        }
 
+       if (nvdimm_namespace_locked(ndns))
+               return ERR_PTR(-EACCES);
+
        size = nvdimm_namespace_capacity(ndns);
        if (size < ND_MIN_NAMESPACE_SIZE) {
                dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
index 79274ead54fb0cbe3f7034f095a7b1d3b925756a..ac68072fb8cd683c8917c9f061f127315cabb2ef 100644 (file)
@@ -100,6 +100,14 @@ struct nd_region;
 struct nvdimm_drvdata;
 struct nd_mapping;
 void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
+
+int __reserve_free_pmem(struct device *dev, void *data);
+void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
+                      struct nd_mapping *nd_mapping);
+
+resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
+                                          struct nd_mapping *nd_mapping);
+resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
                struct nd_mapping *nd_mapping, resource_size_t *overlap);
 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
index 6ee7fd7e4bbdc6e37ccdfa4cd3616339e8791ba2..98317e7ce5b54dbd2e2172a40d114e2734650391 100644 (file)
@@ -357,6 +357,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
                struct nd_label_id *label_id, resource_size_t start,
                resource_size_t n);
 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
+bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
index dd17acd8fe6810e5d1a04c05bd8f23241ceec409..6071e2942053c903564d6f08f278d3735a619308 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/hdreg.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/set_memory.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/badblocks.h>
@@ -51,6 +52,30 @@ static struct nd_region *to_region(struct pmem_device *pmem)
        return to_nd_region(to_dev(pmem)->parent);
 }
 
+static void hwpoison_clear(struct pmem_device *pmem,
+               phys_addr_t phys, unsigned int len)
+{
+       unsigned long pfn_start, pfn_end, pfn;
+
+       /* only pmem in the linear map supports HWPoison */
+       if (is_vmalloc_addr(pmem->virt_addr))
+               return;
+
+       pfn_start = PHYS_PFN(phys);
+       pfn_end = pfn_start + PHYS_PFN(len);
+       for (pfn = pfn_start; pfn < pfn_end; pfn++) {
+               struct page *page = pfn_to_page(pfn);
+
+               /*
+                * Note, no need to hold a get_dev_pagemap() reference
+                * here since we're in the driver I/O path and
+                * outstanding I/O requests pin the dev_pagemap.
+                */
+               if (test_and_clear_pmem_poison(page))
+                       clear_mce_nospec(pfn);
+       }
+}
+
 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
                phys_addr_t offset, unsigned int len)
 {
@@ -65,6 +90,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
        if (cleared < len)
                rc = BLK_STS_IOERR;
        if (cleared > 0 && cleared / 512) {
+               hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
                cleared /= 512;
                dev_dbg(dev, "%#llx clear %ld sector%s\n",
                                (unsigned long long) sector, cleared,
@@ -226,8 +252,11 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
        if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
                                        PFN_PHYS(nr_pages))))
                return -EIO;
-       *kaddr = pmem->virt_addr + offset;
-       *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+
+       if (kaddr)
+               *kaddr = pmem->virt_addr + offset;
+       if (pfn)
+               *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
        /*
         * If badblocks are present, limit known good range to the
index a64ebc78b5dffbac4072fc722d9961650e681f88..59cfe13ea8a85c3bcccf9211409ae237196626d5 100644 (file)
@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __NVDIMM_PMEM_H__
 #define __NVDIMM_PMEM_H__
+#include <linux/page-flags.h>
 #include <linux/badblocks.h>
 #include <linux/types.h>
 #include <linux/pfn_t.h>
@@ -27,4 +28,16 @@ struct pmem_device {
 
 long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
                long nr_pages, void **kaddr, pfn_t *pfn);
+
+#ifdef CONFIG_MEMORY_FAILURE
+static inline bool test_and_clear_pmem_poison(struct page *page)
+{
+       return TestClearPageHWPoison(page);
+}
+#else
+static inline bool test_and_clear_pmem_poison(struct page *page)
+{
+       return false;
+}
+#endif
 #endif /* __NVDIMM_PMEM_H__ */
index ec3543b83330f25e040894599c44b3898d6bd66b..fa37afcd43ff8ceb8ac4e8b18a8a7f23218d3e90 100644 (file)
@@ -389,6 +389,30 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
        return available;
 }
 
+resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
+{
+       resource_size_t available = 0;
+       int i;
+
+       if (is_memory(&nd_region->dev))
+               available = PHYS_ADDR_MAX;
+
+       WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+
+               if (is_memory(&nd_region->dev))
+                       available = min(available,
+                                       nd_pmem_max_contiguous_dpa(nd_region,
+                                                                  nd_mapping));
+               else if (is_nd_blk(&nd_region->dev))
+                       available += nd_blk_available_dpa(nd_region);
+       }
+       if (is_memory(&nd_region->dev))
+               return available * nd_region->ndr_mappings;
+       return available;
+}
+
 static ssize_t available_size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
@@ -410,6 +434,21 @@ static ssize_t available_size_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(available_size);
 
+static ssize_t max_available_extent_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct nd_region *nd_region = to_nd_region(dev);
+       unsigned long long available = 0;
+
+       nvdimm_bus_lock(dev);
+       wait_nvdimm_bus_probe_idle(dev);
+       available = nd_region_allocatable_dpa(nd_region);
+       nvdimm_bus_unlock(dev);
+
+       return sprintf(buf, "%llu\n", available);
+}
+static DEVICE_ATTR_RO(max_available_extent);
+
 static ssize_t init_namespaces_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
@@ -561,6 +600,7 @@ static struct attribute *nd_region_attributes[] = {
        &dev_attr_read_only.attr,
        &dev_attr_set_cookie.attr,
        &dev_attr_available_size.attr,
+       &dev_attr_max_available_extent.attr,
        &dev_attr_namespace_seed.attr,
        &dev_attr_init_namespaces.attr,
        &dev_attr_badblocks.attr,
index 466e3c8582f0fd62628b90872b2046971e064776..74eaedd5b860f1c1fd42faa187d5cda802d0ae8e 100644 (file)
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
  */
 DEFINE_RAW_SPINLOCK(devtree_lock);
 
+bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+       const char *node_name;
+       size_t len;
+
+       if (!np)
+               return false;
+
+       node_name = kbasename(np->full_name);
+       len = strchrnul(node_name, '@') - node_name;
+
+       return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
+}
+
+bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+       if (!np)
+               return false;
+
+       return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
+}
+
 int of_n_addr_cells(struct device_node *np)
 {
        u32 cells;
@@ -118,6 +140,9 @@ void of_populate_phandle_cache(void)
                if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
                        phandles++;
 
+       if (!phandles)
+               goto out;
+
        cache_entries = roundup_pow_of_two(phandles);
        phandle_cache_mask = cache_entries - 1;
 
@@ -719,6 +744,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
 }
 EXPORT_SYMBOL(of_get_next_available_child);
 
+/**
+ * of_get_compatible_child - Find compatible child node
+ * @parent:    parent node
+ * @compatible:        compatible string
+ *
+ * Lookup child node whose compatible property contains the given compatible
+ * string.
+ *
+ * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * when done; or NULL if not found.
+ */
+struct device_node *of_get_compatible_child(const struct device_node *parent,
+                               const char *compatible)
+{
+       struct device_node *child;
+
+       for_each_child_of_node(parent, child) {
+               if (of_device_is_compatible(child, compatible))
+                       break;
+       }
+
+       return child;
+}
+EXPORT_SYMBOL(of_get_compatible_child);
+
 /**
  *     of_get_child_by_name - Find the child node by name for a given parent
  *     @node:  parent node
index 7ba90c290a428186322cc0128ae4a7afbae87748..6c59673933e90817dea1023f3e200c486f82fdde 100644 (file)
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        if (!dev)
                goto err_clear_flag;
 
+       /* AMBA devices only support a single DMA mask */
+       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+
        /* setup generic device info */
        dev->dev.of_node = of_node_get(node);
        dev->dev.fwnode = &node->fwnode;
index c00f82cc54aacac60c6b24ce424402cb3e4dfea1..ee80e79db21a24907ccc96c654b0e253bdeec399 100644 (file)
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
 
 #define STATUS_REVISION_MISMATCH 0xC0000059
 
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
 /*
  * Message Types
  */
@@ -494,6 +497,7 @@ struct hv_pci_dev {
        struct list_head list_entry;
        refcount_t refs;
        enum hv_pcichild_state state;
+       struct pci_slot *pci_slot;
        struct pci_function_description desc;
        bool reported_missing;
        struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,34 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
        spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 }
 
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+       struct hv_pci_dev *hpdev;
+       char name[SLOT_NAME_SIZE];
+       int slot_nr;
+
+       list_for_each_entry(hpdev, &hbus->children, list_entry) {
+               if (hpdev->pci_slot)
+                       continue;
+
+               slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+               snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+               hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+                                         name, NULL);
+               if (!hpdev->pci_slot)
+                       pr_warn("pci_create slot %s failed\n", name);
+       }
+}
+
 /**
  * create_root_hv_pci_bus() - Expose a new root PCI bus
  * @hbus:      Root PCI bus, as understood by this driver
@@ -1480,6 +1512,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
        pci_lock_rescan_remove();
        pci_scan_child_bus(hbus->pci_bus);
        pci_bus_assign_resources(hbus->pci_bus);
+       hv_pci_assign_slots(hbus);
        pci_bus_add_devices(hbus->pci_bus);
        pci_unlock_rescan_remove();
        hbus->state = hv_pcibus_installed;
@@ -1742,6 +1775,7 @@ static void pci_devices_present_work(struct work_struct *work)
                 */
                pci_lock_rescan_remove();
                pci_scan_child_bus(hbus->pci_bus);
+               hv_pci_assign_slots(hbus);
                pci_unlock_rescan_remove();
                break;
 
@@ -1858,6 +1892,9 @@ static void hv_eject_device_work(struct work_struct *work)
        list_del(&hpdev->list_entry);
        spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
 
+       if (hpdev->pci_slot)
+               pci_destroy_slot(hpdev->pci_slot);
+
        memset(&ctxt, 0, sizeof(ctxt));
        ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
        ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
index 7136e34309250aa2a3cc0d2880f563a3b183e895..a938abdb41ceeb575dc62af740d78d278104280c 100644 (file)
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
        u16 slot_status;
        int retval;
 
-       /* Clear sticky power-fault bit from previous power failures */
+       /* Clear power-fault bit from previous power failures */
        pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
        if (slot_status & PCI_EXP_SLTSTA_PFD)
                pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_button_press(slot);
        }
 
+       /* Check Power Fault Detected */
+       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+               ctrl->power_fault_detected = 1;
+               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
+               pciehp_set_attention_status(slot, 1);
+               pciehp_green_led_off(slot);
+       }
+
        /*
         * Disable requests have higher priority than Presence Detect Changed
         * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_presence_or_link_change(slot, events);
        up_read(&ctrl->reset_lock);
 
-       /* Check Power Fault Detected */
-       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
-               ctrl->power_fault_detected = 1;
-               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
-               pciehp_set_attention_status(slot, 1);
-               pciehp_green_led_off(slot);
-       }
-
        pci_config_pm_runtime_put(pdev);
        wake_up(&ctrl->requester);
        return IRQ_HANDLED;
index 29ff9619b5fa8e3623778a696cd2b9d572801c43..1835f3a7aa8d2f5a502a0629bfb0c0cc96420dd4 100644 (file)
@@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
 
        return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
 }
+EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
 
 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 {
@@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
  */
 int pci_reset_bus(struct pci_dev *pdev)
 {
-       return pci_probe_reset_slot(pdev->slot) ?
+       return (!pci_probe_reset_slot(pdev->slot)) ?
            __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
 }
 EXPORT_SYMBOL_GPL(pci_reset_bus);
index ec784009a36b8d7b694ebb8f0b26a3f3810636f5..201f9e5ff55c0a97e330d9dcf139f8d2cd96bad1 100644 (file)
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
 {
 #ifdef CONFIG_PCI_PASID
        struct pci_dev *bridge;
+       int pcie_type;
        u32 cap;
 
        if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
        if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
                return;
 
-       if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+       pcie_type = pci_pcie_type(dev);
+       if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+           pcie_type == PCI_EXP_TYPE_RC_END)
                dev->eetlp_prefix_path = 1;
        else {
                bridge = pci_upstream_bridge(dev);
index ef7143a274e00c9431a26c14702443dbd7853aa1..6bc27b7fd452ad591626625c69454d85041c1a02 100644 (file)
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
  *
  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
  *
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
        case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
        case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
        case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
-       case 0xa32c ... 0xa343:                         /* 300 series */
                return true;
        }
 
index 9940cc70f38b0c6a92c1f1381c401ea395b36d1a..54a8b30dda38c446c8c4f839ba2c6cb8be504a8d 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/poll.h>
 #include <linux/wait.h>
 
+#include <linux/nospec.h>
+
 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
        default:
                if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
                        return -EINVAL;
+               p.port = array_index_nospec(p.port,
+                                       ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
                p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
                break;
        }
index ece41fb2848f20edbd995176fbb6b7fd6ab3004e..c4f4d904e4a61e4c0d622a08fcb85e1f5e63b6a7 100644 (file)
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
        }
 
        /* if the configuration is provided through pdata, apply it */
-       if (pdata) {
+       if (pdata && pdata->gpio_configs) {
                ret = pinctrl_register_mappings(pdata->gpio_configs,
                                                pdata->n_gpio_configs);
                if (ret) {
index 6a1b6058b9910269c60c448cbca3350bce399af6..628817c40e3bbc79cc1098368a23fc2d75e587e5 100644 (file)
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
 
                err = pinctrl_generic_add_group(jzpc->pctl, group->name,
                                group->pins, group->num_pins, group->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register group %s\n",
                                        group->name);
                        return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
                err = pinmux_generic_add_function(jzpc->pctl, func->name,
                                func->group_names, func->num_group_names,
                                func->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register function %s\n",
                                        func->name);
                        return err;
index 2155a30c282b24e20ba440e1398e8d71b308647e..5d72ffad32c299eb2db1e24c5827a11a54a4084e 100644 (file)
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       /*
+        * There are two bits that control interrupt forwarding to the CPU. The
+        * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
+        * latched into the interrupt status register when the hardware detects
+        * an irq that it's configured for (either edge for edge type or level
+        * for level type irq). The 'non-raw' status enable bit causes the
+        * hardware to assert the summary interrupt to the CPU if the latched
+        * status bit is set. There's a bug though, the edge detection logic
+        * seems to have a problem where toggling the RAW_STATUS_EN bit may
+        * cause the status bit to latch spuriously when there isn't any edge
+        * so we can't touch that bit for edge type irqs and we have to keep
+        * the bit set anyway so that edges are latched while the line is masked.
+        *
+        * To make matters more complicated, leaving the RAW_STATUS_EN bit
+        * enabled all the time causes level interrupts to re-latch into the
+        * status register because the level is still present on the line after
+        * we ack it. We clear the raw status enable bit during mask here and
+        * set the bit on unmask so the interrupt can't latch into the hardware
+        * while it's masked.
+        */
+       if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
+               val &= ~BIT(g->intr_raw_status_bit);
+
        val &= ~BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
index d975462a4c576748eb6163f512a607d4d3b8d19e..f10af5c383c551d7632d0e66b751792ae9dc7abd 100644 (file)
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
                if (obj && obj->type == ACPI_TYPE_INTEGER)
                        *out_data = (u32) obj->integer.value;
        }
+       kfree(output.pointer);
        return status;
 
 }
index 88afe5651d24aec8e1d30b748cd67f295d1558d5..cf2229ece9ff6f11cce316f2e51c8088c30053ec 100644 (file)
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
        dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
                priv->buf->std.output[0], priv->buf->std.output[1],
                priv->buf->std.output[2], priv->buf->std.output[3]);
+       kfree(output.pointer);
 
        return 0;
 }
index a70262cb7e569dc5c47874bd286bd3afd58a1f2e..13d28fdbdbb535cdf665b39c8b52ad5127d0b9b9 100644 (file)
@@ -73,6 +73,13 @@ config RESET_MESON
        help
          This enables the reset driver for Amlogic Meson SoCs.
 
+config RESET_MESON_AUDIO_ARB
+       tristate "Meson Audio Memory Arbiter Reset Driver"
+       depends on ARCH_MESON || COMPILE_TEST
+       help
+         This enables the reset driver for Audio Memory Arbiter of
+         Amlogic's A113 based SoCs
+
 config RESET_OXNAS
        bool
 
index 0676b6b1976f2e5c02c2e136ddf0d03c4c5aace5..4243c38228e284478048c42cfbf0f900b7d7aed4 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
 obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
 obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
 obj-$(CONFIG_RESET_MESON) += reset-meson.o
+obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
 obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
 obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
 obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
index 14bc78d287074330d0c7b531fcd8c1f1417e21f2..97d9f08271c5b3756386d398f009d2c90ec6a6d8 100644 (file)
@@ -81,7 +81,7 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
 {
        struct imx7_src *imx7src = to_imx7_src(rcdev);
        const struct imx7_src_signal *signal = &imx7_src_signals[id];
-       unsigned int value = 0;
+       unsigned int value = assert ? signal->bit : 0;
 
        switch (id) {
        case IMX7_RESET_PCIEPHY:
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
new file mode 100644 (file)
index 0000000..9175161
--- /dev/null
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#include <dt-bindings/reset/amlogic,meson-axg-audio-arb.h>
+
+struct meson_audio_arb_data {
+       struct reset_controller_dev rstc;
+       void __iomem *regs;
+       struct clk *clk;
+       const unsigned int *reset_bits;
+       spinlock_t lock;
+};
+
+#define ARB_GENERAL_BIT        31
+
+static const unsigned int axg_audio_arb_reset_bits[] = {
+       [AXG_ARB_TODDR_A]       = 0,
+       [AXG_ARB_TODDR_B]       = 1,
+       [AXG_ARB_TODDR_C]       = 2,
+       [AXG_ARB_FRDDR_A]       = 4,
+       [AXG_ARB_FRDDR_B]       = 5,
+       [AXG_ARB_FRDDR_C]       = 6,
+};
+
+static int meson_audio_arb_update(struct reset_controller_dev *rcdev,
+                                 unsigned long id, bool assert)
+{
+       u32 val;
+       struct meson_audio_arb_data *arb =
+               container_of(rcdev, struct meson_audio_arb_data, rstc);
+
+       spin_lock(&arb->lock);
+       val = readl(arb->regs);
+
+       if (assert)
+               val &= ~BIT(arb->reset_bits[id]);
+       else
+               val |= BIT(arb->reset_bits[id]);
+
+       writel(val, arb->regs);
+       spin_unlock(&arb->lock);
+
+       return 0;
+}
+
+static int meson_audio_arb_status(struct reset_controller_dev *rcdev,
+                                 unsigned long id)
+{
+       u32 val;
+       struct meson_audio_arb_data *arb =
+               container_of(rcdev, struct meson_audio_arb_data, rstc);
+
+       val = readl(arb->regs);
+
+       return !(val & BIT(arb->reset_bits[id]));
+}
+
+static int meson_audio_arb_assert(struct reset_controller_dev *rcdev,
+                                 unsigned long id)
+{
+       return meson_audio_arb_update(rcdev, id, true);
+}
+
+static int meson_audio_arb_deassert(struct reset_controller_dev *rcdev,
+                                   unsigned long id)
+{
+       return meson_audio_arb_update(rcdev, id, false);
+}
+
+static const struct reset_control_ops meson_audio_arb_rstc_ops = {
+       .assert = meson_audio_arb_assert,
+       .deassert = meson_audio_arb_deassert,
+       .status = meson_audio_arb_status,
+};
+
+static const struct of_device_id meson_audio_arb_of_match[] = {
+       { .compatible = "amlogic,meson-axg-audio-arb", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, meson_audio_arb_of_match);
+
+static int meson_audio_arb_remove(struct platform_device *pdev)
+{
+       struct meson_audio_arb_data *arb = platform_get_drvdata(pdev);
+
+       /* Disable all access */
+       spin_lock(&arb->lock);
+       writel(0, arb->regs);
+       spin_unlock(&arb->lock);
+
+       clk_disable_unprepare(arb->clk);
+
+       return 0;
+}
+
+static int meson_audio_arb_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct meson_audio_arb_data *arb;
+       struct resource *res;
+       int ret;
+
+       arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
+       if (!arb)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, arb);
+
+       arb->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(arb->clk)) {
+               if (PTR_ERR(arb->clk) != -EPROBE_DEFER)
+                       dev_err(dev, "failed to get clock\n");
+               return PTR_ERR(arb->clk);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       arb->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(arb->regs))
+               return PTR_ERR(arb->regs);
+
+       spin_lock_init(&arb->lock);
+       arb->reset_bits = axg_audio_arb_reset_bits;
+       arb->rstc.nr_resets = ARRAY_SIZE(axg_audio_arb_reset_bits);
+       arb->rstc.ops = &meson_audio_arb_rstc_ops;
+       arb->rstc.of_node = dev->of_node;
+
+       /*
+        * Enable general :
+        * In the initial state, all memory interfaces are disabled
+        * and the general bit is on
+        */
+       ret = clk_prepare_enable(arb->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable arb clock\n");
+               return ret;
+       }
+       writel(BIT(ARB_GENERAL_BIT), arb->regs);
+
+       /* Register reset controller */
+       ret = devm_reset_controller_register(dev, &arb->rstc);
+       if (ret) {
+               dev_err(dev, "failed to register arb reset controller\n");
+               meson_audio_arb_remove(pdev);
+       }
+
+       return ret;
+}
+
+static struct platform_driver meson_audio_arb_pdrv = {
+       .probe = meson_audio_arb_probe,
+       .remove = meson_audio_arb_remove,
+       .driver = {
+               .name = "meson-audio-arb-reset",
+               .of_match_table = meson_audio_arb_of_match,
+       },
+};
+module_platform_driver(meson_audio_arb_pdrv);
+
+MODULE_DESCRIPTION("Amlogic A113 Audio Memory Arbiter");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
index ed607288e696f64c7d041e4b791ef74836017640..23e526cda5c10b7e61a059a63c3c83f8ed0dc0d8 100644 (file)
@@ -922,9 +922,11 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
        unsigned long dev_sz;
 
        dev_sz = dev_info->end - dev_info->start + 1;
-       *kaddr = (void *) dev_info->start + offset;
-       *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
-                       PFN_DEV|PFN_SPECIAL);
+       if (kaddr)
+               *kaddr = (void *) dev_info->start + offset;
+       if (pfn)
+               *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
+                               PFN_DEV|PFN_SPECIAL);
 
        return (dev_sz - offset) / PAGE_SIZE;
 }
index ec891bc7d10a69b4aa2771ae3da4fc9f50028fa9..f039266b275dad7c31559e897f094a056a8c3c2c 100644 (file)
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(bitmap, 0, bits / 8);
-
        if (str[0] == '0' && str[1] == 'x')
                str++;
        if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
 }
 
 /*
- * str2clrsetmasks() - parse bitmask argument and set the clear and
- * the set bitmap mask. A concatenation (done with ',') of these terms
- * is recognized:
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
  *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
  * <bitnr> may be any valid number (hex, decimal or octal) in the range
  * 0...bits-1; the leading + or - is required. Here are some examples:
  *   +0-15,+32,-128,-0xFF
  *   -0-255,+1-16,+0x128
  *   +1,+2,+3,+4,-5,-7-10
- * Returns a clear and a set bitmask. Every positive value in the string
- * results in a bit set in the set mask and every negative value in the
- * string results in a bit SET in the clear mask. As a bit may be touched
- * more than once, the last 'operation' wins: +0-255,-128 = all but bit
- * 128 set in the set mask, only bit 128 set in the clear mask.
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
  */
-static int str2clrsetmasks(const char *str,
-                          unsigned long *clrmap,
-                          unsigned long *setmap,
-                          int bits)
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
 {
        int a, i, z;
        char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(clrmap, 0, bits / 8);
-       memset(setmap, 0, bits / 8);
-
        while (*str) {
                sign = *str++;
                if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
                        str = np;
                }
                for (i = a; i <= z; i++)
-                       if (sign == '+') {
-                               set_bit_inv(i, setmap);
-                               clear_bit_inv(i, clrmap);
-                       } else {
-                               clear_bit_inv(i, setmap);
-                               set_bit_inv(i, clrmap);
-                       }
+                       if (sign == '+')
+                               set_bit_inv(i, bitmap);
+                       else
+                               clear_bit_inv(i, bitmap);
                while (*str == ',' || *str == '\n')
                        str++;
        }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
                            unsigned long *bitmap, int bits,
                            struct mutex *lock)
 {
-       int i;
+       unsigned long *newmap, size;
+       int rc;
 
        /* bits needs to be a multiple of 8 */
        if (bits & 0x07)
                return -EINVAL;
 
+       size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+       newmap = kmalloc(size, GFP_KERNEL);
+       if (!newmap)
+               return -ENOMEM;
+       if (mutex_lock_interruptible(lock)) {
+               kfree(newmap);
+               return -ERESTARTSYS;
+       }
+
        if (*str == '+' || *str == '-') {
-               DECLARE_BITMAP(clrm, bits);
-               DECLARE_BITMAP(setm, bits);
-
-               i = str2clrsetmasks(str, clrm, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++) {
-                       if (test_bit_inv(i, clrm))
-                               clear_bit_inv(i, bitmap);
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-               }
+               memcpy(newmap, bitmap, size);
+               rc = modify_bitmap(str, newmap, bits);
        } else {
-               DECLARE_BITMAP(setm, bits);
-
-               i = hex2bitmap(str, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++)
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-                       else
-                               clear_bit_inv(i, bitmap);
+               memset(newmap, 0, size);
+               rc = hex2bitmap(str, newmap, bits);
        }
+       if (rc == 0)
+               memcpy(bitmap, newmap, size);
        mutex_unlock(lock);
-
-       return 0;
+       kfree(newmap);
+       return rc;
 }
 
 /*
index 49f64eb3eab0f759d06a5e3edff6550a06d8af40..de8282420f966f0d0f984c72868d9ebddcf11207 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
 
        priv.buffer_len = oat_data.buffer_len;
        priv.response_len = 0;
-       priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+       priv.buffer = vzalloc(oat_data.buffer_len);
        if (!priv.buffer) {
                rc = -ENOMEM;
                goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
                        rc = -EFAULT;
 
 out_free:
-       kfree(priv.buffer);
+       vfree(priv.buffer);
 out:
        return rc;
 }
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                dev->hw_features |= NETIF_F_SG;
                dev->vlan_features |= NETIF_F_SG;
+               if (IS_IQD(card))
+                       dev->features |= NETIF_F_SG;
        }
 
        return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
        qeth_update_from_chp_desc(card);
 
        card->dev = qeth_alloc_netdev(card);
-       if (!card->dev)
+       if (!card->dev) {
+               rc = -ENOMEM;
                goto err_card;
+       }
 
        qeth_determine_capabilities(card);
        enforced_disc = qeth_enforce_discipline(card);
index 710fa74892ae55a65f3849147bb3986946c68bf8..b5e38531733f26e49a90158acf481744106dd42f 100644 (file)
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 7175086677fb4be85cc41dd2748157e71986fd30..ada258c01a08e084b11d02d73e553404bfce82cb 100644 (file)
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 8fc851a9e1162a5695c1a344445b3fad7685f4dc..7c097006c54db679c40ebd51ca5efcc8d8dc297d 100644 (file)
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
        default y
        depends on SCSI
        ---help---
-         This option enables the new blk-mq based I/O path for SCSI
-         devices by default.  With the option the scsi_mod.use_blk_mq
-         module/boot option defaults to Y, without it to N, but it can
-         still be overridden either way.
+         This option enables the blk-mq based I/O path for SCSI devices by
+         default.  With this option the scsi_mod.use_blk_mq module/boot
+         option defaults to Y, without it to N, but it can still be
+         overridden either way.
 
-         If unsure say N.
+         If unsure say Y.
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
index 29bf1e60f5428ab4542cf0bd838342bd8d405f8b..39eb415987fc9fd901ba9ee157dedfcf76910829 100644 (file)
@@ -1346,7 +1346,7 @@ struct fib {
 struct aac_hba_map_info {
        __le32  rmw_nexus;              /* nexus for native HBA devices */
        u8              devtype;        /* device type */
-       u8              reset_state;    /* 0 - no reset, 1..x - */
+       s8              reset_state;    /* 0 - no reset, 1..x - */
                                        /* after xth TM LUN reset */
        u16             qd_limit;
        u32             scan_counter;
index 23d07e9f87d0d1e0357547c4f168ab966a0a809e..e5192388647580903b45f885c7995a5b8bbe4bbb 100644 (file)
@@ -1601,6 +1601,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
        return caps32;
 }
 
+/**
+ *     fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ *     @caps32: a 32-bit Port Capabilities value
+ *
+ *     Returns the equivalent 16-bit Port Capabilities value.  Note that
+ *     not all 32-bit Port Capabilities can be represented in the 16-bit
+ *     Port Capabilities and some fields/values may not make it.
+ */
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+       fw_port_cap16_t caps16 = 0;
+
+       #define CAP32_TO_CAP16(__cap) \
+               do { \
+                       if (caps32 & FW_PORT_CAP32_##__cap) \
+                               caps16 |= FW_PORT_CAP_##__cap; \
+               } while (0)
+
+       CAP32_TO_CAP16(SPEED_100M);
+       CAP32_TO_CAP16(SPEED_1G);
+       CAP32_TO_CAP16(SPEED_10G);
+       CAP32_TO_CAP16(SPEED_25G);
+       CAP32_TO_CAP16(SPEED_40G);
+       CAP32_TO_CAP16(SPEED_100G);
+       CAP32_TO_CAP16(FC_RX);
+       CAP32_TO_CAP16(FC_TX);
+       CAP32_TO_CAP16(802_3_PAUSE);
+       CAP32_TO_CAP16(802_3_ASM_DIR);
+       CAP32_TO_CAP16(ANEG);
+       CAP32_TO_CAP16(FORCE_PAUSE);
+       CAP32_TO_CAP16(MDIAUTO);
+       CAP32_TO_CAP16(MDISTRAIGHT);
+       CAP32_TO_CAP16(FEC_RS);
+       CAP32_TO_CAP16(FEC_BASER_RS);
+
+       #undef CAP32_TO_CAP16
+
+       return caps16;
+}
+
 /**
  *      lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
  *      @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
                        val = 1;
 
                        csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                                      hw->pfn, 0, 1, &param, &val, false,
+                                      hw->pfn, 0, 1, &param, &val, true,
                                       NULL);
 
                        if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
                                return -EINVAL;
                        }
 
-                       csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
-                                                       &val);
-                       if (retval != FW_SUCCESS) {
-                               csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
-                                        portid, retval);
-                               mempool_free(mbp, hw->mb_mempool);
-                               return -EINVAL;
-                       }
-
-                       fw_caps = val;
+                       csio_mb_process_read_params_rsp(hw, mbp, &retval,
+                                                       0, NULL);
+                       fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
                }
 
                /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
 }
 
 /*
- * Returns -EINVAL if attempts to flash the firmware failed
- * else returns 0,
+ * Returns -EINVAL if attempts to flash the firmware failed,
+ * -ENOMEM if memory allocation failed else returns 0,
  * if flashing was not attempted because the card had the
  * latest firmware ECANCELED is returned
  */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                return -EINVAL;
        }
 
+       /* allocate memory to read the header of the firmware on the
+        * card
+        */
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+       if (!card_fw)
+               return -ENOMEM;
+
        if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
                fw_bin_file = FW_FNAME_T5;
        else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                fw_size = fw->size;
        }
 
-       /* allocate memory to read the header of the firmware on the
-        * card
-        */
-       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
-
        /* upgrade FW logic */
        ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
                         hw->fw_state, reset);
index 9e73ef771eb739a8158bedc3d1d4d86b512e0749..e351af6e7c81c1d5fca398425dca662a49493981 100644 (file)
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
 
 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
 
 int csio_hw_start(struct csio_hw *);
index c026417269c3c9f191ff948f1a0a3912d87052d8..6f13673d6aa054e4b2e71ec240215b3ad35dc6ee 100644 (file)
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
                        FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
 
        if (fw_caps == FW_CAPS16)
-               cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+               cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
        else
                cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
 }
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
                        *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
                        *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
                } else {
-                       *pcaps = ntohs(rsp->u.info32.pcaps32);
-                       *acaps = ntohs(rsp->u.info32.acaps32);
+                       *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
+                       *acaps = be32_to_cpu(rsp->u.info32.acaps32);
                }
        }
 }
index f02dcc875a09a6c6ece63e89e8256c512e11f01e..ea4b0bb0c1cd4e6b12748256741afae339311591 100644 (file)
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(scsi_host_get);
 
-struct scsi_host_mq_in_flight {
-       int cnt;
-};
-
-static void scsi_host_check_in_flight(struct request *rq, void *data,
-               bool reserved)
-{
-       struct scsi_host_mq_in_flight *in_flight = data;
-
-       if (blk_mq_request_started(rq))
-               in_flight->cnt++;
-}
-
 /**
  * scsi_host_busy - Return the host busy counter
  * @shost:     Pointer to Scsi_Host to inc.
  **/
 int scsi_host_busy(struct Scsi_Host *shost)
 {
-       struct scsi_host_mq_in_flight in_flight = {
-               .cnt = 0,
-       };
-
-       if (!shost->use_blk_mq)
-               return atomic_read(&shost->host_busy);
-
-       blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
-                       &in_flight);
-       return in_flight.cnt;
+       return atomic_read(&shost->host_busy);
 }
 EXPORT_SYMBOL(scsi_host_busy);
 
index 58bb70b886d70d714ee6b448843aa113c2b313c0..c120929d4ffe52f3f24664226b7120b9057a2a1d 100644 (file)
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
-       .max_sectors = 1024,
+       .max_sectors = 2048,
        .no_write_same = 1,
 };
 
index e0d0da5f43d6114e548974062d8a3118fdf384c3..43732e8d13473f84b88945072d7156d7a2982279 100644 (file)
@@ -672,7 +672,7 @@ struct lpfc_hba {
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
 #define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK      0x10      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
index 5a25553415f8bbb922883e9bbc8af18daebdda52..057a60abe664d269af64a2e85a4cea2faf740cfc 100644 (file)
@@ -5122,16 +5122,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
 
 /*
 # lpfc_fdmi_on: Controls FDMI support.
-#       0       No FDMI support (default)
-#       1       Traditional FDMI support
+#       0       No FDMI support
+#       1       Traditional FDMI support (default)
 # Traditional FDMI support means the driver will assume FDMI-2 support;
 # however, if that fails, it will fallback to FDMI-1.
 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
 # lpfc_fdmi_on.
-# Value range [0,1]. Default value is 0.
+# Value range [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
 
 /*
 # Specifies the maximum number of ELS cmds we can have outstanding (for
index 0e56f1eb05dc5caa711360dab68fcb7584d71b71..eaf36ccf58db55779033f73c936a5dec586b58c1 100644 (file)
@@ -423,19 +423,11 @@ static int osd_probe(struct device *dev)
        if (scsi_device->type != TYPE_OSD)
                return -ENODEV;
 
-       do {
-               if (!ida_pre_get(&osd_minor_ida, GFP_KERNEL))
-                       return -ENODEV;
-
-               error = ida_get_new(&osd_minor_ida, &minor);
-       } while (error == -EAGAIN);
-
-       if (error)
-               return error;
-       if (minor >= SCSI_OSD_MAX_MINOR) {
-               error = -EBUSY;
-               goto err_retract_minor;
-       }
+       minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL);
+       if (minor == -ENOSPC)
+               return -EBUSY;
+       if (minor < 0)
+               return -ENODEV;
 
        error = -ENOMEM;
        oud = kzalloc(sizeof(*oud), GFP_KERNEL);
@@ -499,7 +491,7 @@ static int osd_probe(struct device *dev)
 err_free_osd:
        put_device(&oud->class_dev);
 err_retract_minor:
-       ida_remove(&osd_minor_ida, minor);
+       ida_free(&osd_minor_ida, minor);
        return error;
 }
 
@@ -514,7 +506,7 @@ static int osd_remove(struct device *dev)
        }
 
        cdev_device_del(&oud->cdev, &oud->class_dev);
-       ida_remove(&osd_minor_ida, oud->minor);
+       ida_free(&osd_minor_ida, oud->minor);
        put_device(&oud->class_dev);
 
        return 0;
index fc3babc15fa3e9b423d7bdf23d64c960899322c0..a6f96b35e971ea52b0f3508308f7ff2b7c056be7 100644 (file)
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
        QEDI_NVM_TGT_SEC,
 };
 
+struct qedi_nvm_iscsi_image {
+       struct nvm_iscsi_cfg iscsi_cfg;
+       u32 crc;
+};
+
 struct qedi_uio_ctrl {
        /* meta data */
        u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
        void *bdq_pbl_list;
        dma_addr_t bdq_pbl_list_dma;
        u8 bdq_pbl_list_num_entries;
-       struct nvm_iscsi_cfg *iscsi_cfg;
+       struct qedi_nvm_iscsi_image *iscsi_image;
        dma_addr_t nvm_buf_dma;
        void __iomem *bdq_primary_prod;
        void __iomem *bdq_secondary_prod;
index aa96bccb5a9645d1b43f6fae77543a56dd89f58b..cc8e64dc65ad896d8d5ce3e7fa7495d88d95ccfb 100644 (file)
@@ -1346,23 +1346,26 @@ exit_setup_int:
 
 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       if (qedi->iscsi_cfg)
+       if (qedi->iscsi_image)
                dma_free_coherent(&qedi->pdev->dev,
-                                 sizeof(struct nvm_iscsi_cfg),
-                                 qedi->iscsi_cfg, qedi->nvm_buf_dma);
+                                 sizeof(struct qedi_nvm_iscsi_image),
+                                 qedi->iscsi_image, qedi->nvm_buf_dma);
 }
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
-                                            sizeof(struct nvm_iscsi_cfg),
-                                            &qedi->nvm_buf_dma, GFP_KERNEL);
-       if (!qedi->iscsi_cfg) {
+       struct qedi_nvm_iscsi_image nvm_image;
+
+       qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
+                                               sizeof(nvm_image),
+                                               &qedi->nvm_buf_dma,
+                                               GFP_KERNEL);
+       if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
                return -ENOMEM;
        }
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
                  qedi->nvm_buf_dma);
 
        return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
        struct nvm_iscsi_block *block;
 
        pf = qedi->dev_info.common.abs_pf_id;
-       block = &qedi->iscsi_cfg->block[0];
+       block = &qedi->iscsi_image->iscsi_cfg.block[0];
        for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
                flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
                        NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       u16 len;
-
-       len = sizeof(struct nvm_iscsi_cfg);
+       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
-                                             (char *)qedi->iscsi_cfg, len);
+                                             (char *)qedi->iscsi_image,
+                                             sizeof(nvm_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
index fecf96f0225c6e2b010cb5e6e2411d1c00540ab7..199d3ba1916d5ef5b302e762f4d4e29a33066413 100644 (file)
@@ -374,8 +374,8 @@ struct atio_from_isp {
 static inline int fcpcmd_is_corrupted(struct atio *atio)
 {
        if (atio->entry_type == ATIO_TYPE7 &&
-           (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
-           FCP_CMD_LENGTH_MIN))
+           ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+            FCP_CMD_LENGTH_MIN))
                return 1;
        else
                return 0;
index 0adfb3bce0fd677ddae9f9d806472ccf2077ce25..eb97d2dd36516d0a3a5c0a9db1f255aaf5a9c56f 100644 (file)
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
        unsigned long flags;
 
        rcu_read_lock();
-       if (!shost->use_blk_mq)
-               atomic_dec(&shost->host_busy);
+       atomic_dec(&shost->host_busy);
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
 
 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 {
-       /*
-        * blk-mq can handle host queue busy efficiently via host-wide driver
-        * tag allocation
-        */
-
-       if (!shost->use_blk_mq && shost->can_queue > 0 &&
+       if (shost->can_queue > 0 &&
            atomic_read(&shost->host_busy) >= shost->can_queue)
                return true;
        if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
        if (scsi_host_in_recovery(shost))
                return 0;
 
-       if (!shost->use_blk_mq)
-               busy = atomic_inc_return(&shost->host_busy) - 1;
-       else
-               busy = 0;
+       busy = atomic_inc_return(&shost->host_busy) - 1;
        if (atomic_read(&shost->host_blocked) > 0) {
                if (busy)
                        goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                                     "unblocking host at zero depth\n"));
        }
 
-       if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
+       if (shost->can_queue > 0 && busy >= shost->can_queue)
                goto starved;
        if (shost->host_self_blocked)
                goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
         * with the locks as normal issue path does.
         */
        atomic_inc(&sdev->device_busy);
-
-       if (!shost->use_blk_mq)
-               atomic_inc(&shost->host_busy);
+       atomic_inc(&shost->host_busy);
        if (starget->can_queue > 0)
                atomic_inc(&starget->target_busy);
 
index a58cee7a85f2aef8daee2da9ba3960e6d4090276..b79b366a94f7993d2dc83c2b0c757c70ed753e75 100644 (file)
@@ -123,7 +123,6 @@ static void scsi_disk_release(struct device *cdev);
 static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
 static void sd_print_result(const struct scsi_disk *, const char *, int);
 
-static DEFINE_SPINLOCK(sd_index_lock);
 static DEFINE_IDA(sd_index_ida);
 
 /* This semaphore is used to mediate the 0->1 reference get in the
@@ -3340,16 +3339,8 @@ static int sd_probe(struct device *dev)
        if (!gd)
                goto out_free;
 
-       do {
-               if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
-                       goto out_put;
-
-               spin_lock(&sd_index_lock);
-               error = ida_get_new(&sd_index_ida, &index);
-               spin_unlock(&sd_index_lock);
-       } while (error == -EAGAIN);
-
-       if (error) {
+       index = ida_alloc(&sd_index_ida, GFP_KERNEL);
+       if (index < 0) {
                sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
                goto out_put;
        }
@@ -3393,9 +3384,7 @@ static int sd_probe(struct device *dev)
        return 0;
 
  out_free_index:
-       spin_lock(&sd_index_lock);
-       ida_remove(&sd_index_ida, index);
-       spin_unlock(&sd_index_lock);
+       ida_free(&sd_index_ida, index);
  out_put:
        put_disk(gd);
  out_free:
@@ -3460,9 +3449,7 @@ static void scsi_disk_release(struct device *dev)
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
        
-       spin_lock(&sd_index_lock);
-       ida_remove(&sd_index_ida, sdkp->index);
-       spin_unlock(&sd_index_lock);
+       ida_free(&sd_index_ida, sdkp->index);
 
        disk->private_data = NULL;
        put_disk(disk);
index 7cb3ab0a35a009f619ced0409f449e308fd51096..3082e72e4f6c62ed00651e389381ee2896336c52 100644 (file)
 
 #define DRIVER_NAME "fsl-dspi"
 
+#ifdef CONFIG_M5441x
+#define DSPI_FIFO_SIZE                 16
+#else
 #define DSPI_FIFO_SIZE                 4
+#endif
 #define DSPI_DMA_BUFSIZE               (DSPI_FIFO_SIZE * 1024)
 
 #define SPI_MCR                0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
 static void dspi_eoq_write(struct fsl_dspi *dspi)
 {
        int fifo_size = DSPI_FIFO_SIZE;
+       u16 xfer_cmd = dspi->tx_cmd;
 
        /* Fill TX FIFO with as many transfers as possible */
        while (dspi->len && fifo_size--) {
+               dspi->tx_cmd = xfer_cmd;
                /* Request EOQF for last transfer in FIFO */
                if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
                        dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
index ec395a6baf9cacbae1830cf062fb40ae7e778f62..9da0bc5a036cfff6af0395ace48cad20ae9852d6 100644 (file)
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
         */
        if (ctlr->num_chipselect == 0)
                return -EINVAL;
-       /* allocate dynamic bus number using Linux idr */
-       if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
+       if (ctlr->bus_num >= 0) {
+               /* devices with a fixed bus num must check-in with the num */
+               mutex_lock(&board_lock);
+               id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+                       ctlr->bus_num + 1, GFP_KERNEL);
+               mutex_unlock(&board_lock);
+               if (WARN(id < 0, "couldn't get idr"))
+                       return id == -ENOSPC ? -EBUSY : id;
+               ctlr->bus_num = id;
+       } else if (ctlr->dev.of_node) {
+               /* allocate dynamic bus number using Linux idr */
                id = of_alias_get_id(ctlr->dev.of_node, "spi");
                if (id >= 0) {
                        ctlr->bus_num = id;
index 96f614934df12f75aebd943cc229fb60a25e826c..663b755bf2fbf1d5b2afb6a8991d78a8b7bcf799 100644 (file)
@@ -2,7 +2,7 @@
 
 config EROFS_FS
        tristate "EROFS filesystem support"
-       depends on BROKEN
+       depends on BLOCK
        help
          EROFS(Enhanced Read-Only File System) is a lightweight
          read-only file system with modern designs (eg. page-sized
index 1aec509c805feddf35b9db78e2db78bdcd5b00fd..2df9768edac96d1dc199071698558c6fb1e5da5d 100644 (file)
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
                goto err_sbread;
 
        sb->s_magic = EROFS_SUPER_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_time_gran = 1;
 
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
 {
        BUG_ON(!sb_rdonly(sb));
 
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index 7e64c7e438f04c527cd730b4853321968d32f94a..a9f4802bb6bec4a6d0570c8a6917962b849bafd4 100644 (file)
@@ -2,3 +2,7 @@
   GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
   lines from device tree, ACPI or board files, board files should
   use <linux/gpio/machine.h>
+
+* convert all these over to drm_simple_display_pipe and submit for inclusion
+  into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
+  drivers anymore.
index 6ff8e01b04cc8f190c2edf08cd4e3ebd35b6aa07..5b1865f8af2d9f46014451e0da4ecc9b3b423c03 100644 (file)
@@ -1,9 +1,22 @@
 This is a list of things that need to be done to get this driver out of the
 staging directory.
+
+- Implement the gasket framework's functionality through UIO instead of
+  introducing a new user-space drivers framework that is quite similar.
+
+  UIO provides the necessary bits to implement user-space drivers. Meanwhile
+  the gasket APIs adds some extra conveniences like PCI BAR mapping, and
+  MSI interrupts. Add these features to the UIO subsystem, then re-implement
+  the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
+
 - Document sysfs files with Documentation/ABI/ entries.
+
 - Use misc interface instead of major number for driver version description.
+
 - Add descriptions of module_param's
+
 - apex_get_status() should actually check status.
+
 - "drivers" should never be dealing with "raw" sysfs calls or mess around with
   kobjects at all. The driver core should handle all of this for you
   automaically. There should not be a need for raw attribute macros.
index da92c493f1577ab7d35da20c73dd70f052fffc77..69cc508af1bce1aa52e244f74b2628fcdba35eb9 100644 (file)
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                ret = PTR_ERR(dev);
                goto err_drv_alloc;
        }
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_pci_enable;
+
        dev->pdev = pdev;
        pci_set_drvdata(pdev, dev);
 
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_drv_dev_register:
        vbox_driver_unload(dev);
  err_vbox_driver_load:
+       pci_disable_device(pdev);
+ err_pci_enable:
        drm_dev_put(dev);
  err_drv_alloc:
        return ret;
index a83eac8668d02f009044ccd491ea5aa3091874a7..79836c8fb909d6851ed1edb014146aae07326ea0 100644 (file)
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
        if (rc)
                return rc;
 
+       mutex_lock(&vbox->hw_mutex);
+       vbox_set_view(crtc);
+       vbox_do_modeset(crtc, &crtc->mode);
+       mutex_unlock(&vbox->hw_mutex);
+
        spin_lock_irqsave(&drm->event_lock, flags);
 
        if (event)
index f7b07c0b5ce2ece87b057d8257a2ae225f8854cb..ee7e26b886a563e8743d6fdf4a3948de9eaec453 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_WILC1000) += wilc1000.o
 
 ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
                -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
                        wilc_wlan.o
 
 obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += $(wilc1000-objs)
 wilc1000-sdio-objs += wilc_sdio.o
 
 obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += $(wilc1000-objs)
 wilc1000-spi-objs += wilc_spi.o
index 01cf4bd2e192da2069a27784d1769e36c7affdc8..3b8d237decbf179b7016242583909fc46146c99c 100644 (file)
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
        }
 
        kfree(wilc);
-       wilc_debugfs_remove();
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
 
 static const struct net_device_ops wilc_netdev_ops = {
        .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
        if (!wl)
                return -ENOMEM;
 
-       wilc_debugfs_init();
        *wilc = wl;
        wl->io_type = io_type;
        wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_init);
+
+MODULE_LICENSE("GPL");
index edc72876458de06448543de1f1d571a561e37642..8001df66b8c26fd06bee036a0997cb2da4f1d74c 100644 (file)
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
 
 #define DBG_LEVEL_ALL  (DEBUG | INFO | WRN | ERR)
 static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
+EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
 
 static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
                                     size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
        },
 };
 
-int wilc_debugfs_init(void)
+static int __init wilc_debugfs_init(void)
 {
        int i;
        struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
        }
        return 0;
 }
+module_init(wilc_debugfs_init);
 
-void wilc_debugfs_remove(void)
+static void __exit wilc_debugfs_remove(void)
 {
        debugfs_remove_recursive(wilc_dir);
 }
+module_exit(wilc_debugfs_remove);
 
 #endif
index 6787b6e9f124379bc1aa02bdd4eea44c9263256b..8b184aa30d25d958dab26ddd1adc73583965debd 100644 (file)
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
        wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
 }
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
 
 void chip_wakeup(struct wilc *wilc)
 {
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
        }
        chip_ps_state = CHIP_WAKEDUP;
 }
+EXPORT_SYMBOL_GPL(chip_wakeup);
 
 void wilc_chip_sleep_manually(struct wilc *wilc)
 {
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
        chip_ps_state = CHIP_SLEEPING_MANUAL;
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
 
 void host_wakeup_notify(struct wilc *wilc)
 {
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
 
 void host_sleep_notify(struct wilc *wilc)
 {
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_sleep_notify);
 
 int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
 {
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
 
        release_bus(wilc, RELEASE_ALLOW_SLEEP);
 }
+EXPORT_SYMBOL_GPL(wilc_handle_isr);
 
 int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
                                u32 buffer_size)
index 00d13b153f8045e047e321a42f37a46d5f9ed9ef..b81a73b9bd67e674a9a45ce785708e775edb5d01 100644 (file)
@@ -831,6 +831,4 @@ struct wilc;
 int wilc_wlan_init(struct net_device *dev);
 u32 wilc_get_chipid(struct wilc *wilc, bool update);
 
-int wilc_debugfs_init(void);
-void wilc_debugfs_remove(void);
 #endif
index 768cce0ccb807518f32b3d72d875915e9391fdfd..76a262674c8dc7c4563fba8f6b570b6155d2d182 100644 (file)
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
        ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
        sgl->offset = sg_offset;
        if (!ret) {
-               pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
-                       __func__, 0, xferlen, sgcnt);
+               pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+                        __func__, 0, xferlen, sgcnt);
                goto rel_ppods;
        }
 
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
        if (ret < 0) {
-               pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
-                       csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+               pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+                        csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 
                ttinfo->sgl = NULL;
                ttinfo->nents = 0;
index 8e223799347a1dd78d5abcd8d8616717e93802ba..cc756a123fd8c33f8ccfcad75edd783efb15698d 100644 (file)
@@ -57,9 +57,8 @@ static DEFINE_SPINLOCK(tiqn_lock);
 static DEFINE_MUTEX(np_lock);
 
 static struct idr tiqn_idr;
-struct idr sess_idr;
+DEFINE_IDA(sess_ida);
 struct mutex auth_id_lock;
-spinlock_t sess_idr_lock;
 
 struct iscsit_global *iscsit_global;
 
@@ -700,9 +699,7 @@ static int __init iscsi_target_init_module(void)
 
        spin_lock_init(&iscsit_global->ts_bitmap_lock);
        mutex_init(&auth_id_lock);
-       spin_lock_init(&sess_idr_lock);
        idr_init(&tiqn_idr);
-       idr_init(&sess_idr);
 
        ret = target_register_template(&iscsi_ops);
        if (ret)
@@ -1419,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
 
        sg_init_table(sg, ARRAY_SIZE(sg));
        sg_set_buf(sg, buf, payload_length);
-       sg_set_buf(sg + 1, pad_bytes, padding);
+       if (padding)
+               sg_set_buf(sg + 1, pad_bytes, padding);
 
        ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
 
@@ -3913,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
 static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
 {
        int ret;
-       u8 buffer[ISCSI_HDR_LEN], opcode;
+       u8 *buffer, opcode;
        u32 checksum = 0, digest = 0;
        struct kvec iov;
 
+       buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+       if (!buffer)
+               return;
+
        while (!kthread_should_stop()) {
                /*
                 * Ensure that both TX and RX per connection kthreads
@@ -3924,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                 */
                iscsit_thread_check_cpumask(conn, current, 0);
 
-               memset(buffer, 0, ISCSI_HDR_LEN);
                memset(&iov, 0, sizeof(struct kvec));
 
                iov.iov_base    = buffer;
@@ -3933,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
                if (ret != ISCSI_HDR_LEN) {
                        iscsit_rx_thread_wait_for_tcp(conn);
-                       return;
+                       break;
                }
 
                if (conn->conn_ops->HeaderDigest) {
@@ -3943,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                        ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
                        if (ret != ISCSI_CRC_LEN) {
                                iscsit_rx_thread_wait_for_tcp(conn);
-                               return;
+                               break;
                        }
 
                        iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3967,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                }
 
                if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
-                       return;
+                       break;
 
                opcode = buffer[0] & ISCSI_OPCODE_MASK;
 
@@ -3978,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                        " while in Discovery Session, rejecting.\n", opcode);
                        iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
                                          buffer);
-                       return;
+                       break;
                }
 
                ret = iscsi_target_rx_opcode(conn, buffer);
                if (ret < 0)
-                       return;
+                       break;
        }
+
+       kfree(buffer);
 }
 
 int iscsi_target_rx_thread(void *arg)
@@ -4211,22 +4214,15 @@ int iscsit_close_connection(
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-       conn->conn_ops = NULL;
-
        if (conn->sock)
                sock_release(conn->sock);
 
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-
        pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
        conn->conn_state = TARG_CONN_STATE_FREE;
-       kfree(conn);
+       iscsit_free_conn(conn);
 
        spin_lock_bh(&sess->conn_lock);
        atomic_dec(&sess->nconn);
@@ -4375,10 +4371,7 @@ int iscsit_close_session(struct iscsi_session *sess)
        pr_debug("Decremented number of active iSCSI Sessions on"
                " iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
 
-       spin_lock(&sess_idr_lock);
-       idr_remove(&sess_idr, sess->session_index);
-       spin_unlock(&sess_idr_lock);
-
+       ida_free(&sess_ida, sess->session_index);
        kfree(sess->sess_ops);
        sess->sess_ops = NULL;
        spin_unlock_bh(&se_tpg->session_lock);
index 42de1843aa40d06f9967693ab5dbf20852e84629..48bac0acf8c76e055c022bc2b74423cca50b04bf 100644 (file)
@@ -55,9 +55,7 @@ extern struct kmem_cache *lio_ooo_cache;
 extern struct kmem_cache *lio_qr_cache;
 extern struct kmem_cache *lio_r2t_cache;
 
-extern struct idr sess_idr;
+extern struct ida sess_ida;
 extern struct mutex auth_id_lock;
-extern spinlock_t sess_idr_lock;
-
 
 #endif   /*** ISCSI_TARGET_H ***/
index 923b1a9fc3dcd95f71de3d2eb78e7e8064b81b01..bb90c80ff3889bdde58df0877a6f7c603bc3eb05 100644 (file)
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
                goto out_req_buf;
        }
 
-       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
-       if (!conn->conn_ops) {
-               pr_err("Unable to allocate memory for"
-                       " struct iscsi_conn_ops.\n");
-               goto out_rsp_buf;
-       }
-
-       init_waitqueue_head(&conn->queues_wq);
-       INIT_LIST_HEAD(&conn->conn_list);
-       INIT_LIST_HEAD(&conn->conn_cmd_list);
-       INIT_LIST_HEAD(&conn->immed_queue_list);
-       INIT_LIST_HEAD(&conn->response_queue_list);
-       init_completion(&conn->conn_post_wait_comp);
-       init_completion(&conn->conn_wait_comp);
-       init_completion(&conn->conn_wait_rcfr_comp);
-       init_completion(&conn->conn_waiting_on_uc_comp);
-       init_completion(&conn->conn_logout_comp);
-       init_completion(&conn->rx_half_close_comp);
-       init_completion(&conn->tx_half_close_comp);
-       init_completion(&conn->rx_login_comp);
-       spin_lock_init(&conn->cmd_lock);
-       spin_lock_init(&conn->conn_usage_lock);
-       spin_lock_init(&conn->immed_queue_lock);
-       spin_lock_init(&conn->nopin_timer_lock);
-       spin_lock_init(&conn->response_queue_lock);
-       spin_lock_init(&conn->state_lock);
-
-       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
-               pr_err("Unable to allocate conn->conn_cpumask\n");
-               goto out_conn_ops;
-       }
        conn->conn_login = login;
 
        return login;
 
-out_conn_ops:
-       kfree(conn->conn_ops);
-out_rsp_buf:
-       kfree(login->rsp_buf);
 out_req_buf:
        kfree(login->req_buf);
 out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
                return -ENOMEM;
        }
 
-       ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
-       if (unlikely(ret)) {
-               kfree(sess);
-               return ret;
-       }
+       if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+               goto free_sess;
+
        sess->init_task_tag     = pdu->itt;
        memcpy(&sess->isid, pdu->isid, 6);
        sess->exp_cmd_sn        = be32_to_cpu(pdu->cmdsn);
@@ -336,22 +299,15 @@ static int iscsi_login_zero_tsih_s1(
        timer_setup(&sess->time2retain_timer,
                    iscsit_handle_time2retain_timeout, 0);
 
-       idr_preload(GFP_KERNEL);
-       spin_lock_bh(&sess_idr_lock);
-       ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
-       if (ret >= 0)
-               sess->session_index = ret;
-       spin_unlock_bh(&sess_idr_lock);
-       idr_preload_end();
-
+       ret = ida_alloc(&sess_ida, GFP_KERNEL);
        if (ret < 0) {
-               pr_err("idr_alloc() for sess_idr failed\n");
+               pr_err("Session ID allocation failed %d\n", ret);
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               kfree(sess);
-               return -ENOMEM;
+               goto free_sess;
        }
 
+       sess->session_index = ret;
        sess->creation_time = get_jiffies_64();
        /*
         * The FFP CmdSN window values will be allocated from the TPG's
@@ -365,20 +321,26 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               kfree(sess);
-               return -ENOMEM;
+               goto free_id;
        }
 
        sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               kfree(sess->sess_ops);
-               kfree(sess);
-               return -ENOMEM;
+               goto free_ops;
        }
 
        return 0;
+
+free_ops:
+       kfree(sess->sess_ops);
+free_id:
+       ida_free(&sess_ida, sess->session_index);
+free_sess:
+       kfree(sess);
+       conn->sess = NULL;
+       return -ENOMEM;
 }
 
 static int iscsi_login_zero_tsih_s2(
@@ -1150,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
        return 0;
 }
 
+static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+       struct iscsi_conn *conn;
+
+       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       if (!conn) {
+               pr_err("Could not allocate memory for new connection\n");
+               return NULL;
+       }
+       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+       conn->conn_state = TARG_CONN_STATE_FREE;
+
+       init_waitqueue_head(&conn->queues_wq);
+       INIT_LIST_HEAD(&conn->conn_list);
+       INIT_LIST_HEAD(&conn->conn_cmd_list);
+       INIT_LIST_HEAD(&conn->immed_queue_list);
+       INIT_LIST_HEAD(&conn->response_queue_list);
+       init_completion(&conn->conn_post_wait_comp);
+       init_completion(&conn->conn_wait_comp);
+       init_completion(&conn->conn_wait_rcfr_comp);
+       init_completion(&conn->conn_waiting_on_uc_comp);
+       init_completion(&conn->conn_logout_comp);
+       init_completion(&conn->rx_half_close_comp);
+       init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
+       spin_lock_init(&conn->cmd_lock);
+       spin_lock_init(&conn->conn_usage_lock);
+       spin_lock_init(&conn->immed_queue_lock);
+       spin_lock_init(&conn->nopin_timer_lock);
+       spin_lock_init(&conn->response_queue_lock);
+       spin_lock_init(&conn->state_lock);
+
+       timer_setup(&conn->nopin_response_timer,
+                   iscsit_handle_nopin_response_timeout, 0);
+       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
+       if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+               goto free_conn;
+
+       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+       if (!conn->conn_ops) {
+               pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+               goto put_transport;
+       }
+
+       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+               pr_err("Unable to allocate conn->conn_cpumask\n");
+               goto free_mask;
+       }
+
+       return conn;
+
+free_mask:
+       free_cpumask_var(conn->conn_cpumask);
+put_transport:
+       iscsit_put_transport(conn->conn_transport);
+free_conn:
+       kfree(conn);
+       return NULL;
+}
+
+void iscsit_free_conn(struct iscsi_conn *conn)
+{
+       free_cpumask_var(conn->conn_cpumask);
+       kfree(conn->conn_ops);
+       iscsit_put_transport(conn->conn_transport);
+       kfree(conn);
+}
+
 void iscsi_target_login_sess_out(struct iscsi_conn *conn,
                struct iscsi_np *np, bool zero_tsih, bool new_sess)
 {
@@ -1161,13 +1192,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
                                   ISCSI_LOGIN_STATUS_INIT_ERR);
        if (!zero_tsih || !conn->sess)
                goto old_sess_out;
-       if (conn->sess->se_sess)
-               transport_free_session(conn->sess->se_sess);
-       if (conn->sess->session_index != 0) {
-               spin_lock_bh(&sess_idr_lock);
-               idr_remove(&sess_idr, conn->sess->session_index);
-               spin_unlock_bh(&sess_idr_lock);
-       }
+
+       transport_free_session(conn->sess->se_sess);
+       ida_free(&sess_ida, conn->sess->session_index);
        kfree(conn->sess->sess_ops);
        kfree(conn->sess);
        conn->sess = NULL;
@@ -1203,10 +1230,6 @@ old_sess_out:
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-
        if (conn->param_list) {
                iscsi_release_param_list(conn->param_list);
                conn->param_list = NULL;
@@ -1224,8 +1247,7 @@ old_sess_out:
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-       kfree(conn);
+       iscsit_free_conn(conn);
 }
 
 static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1255,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        }
        spin_unlock_bh(&np->np_thread_lock);
 
-       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       conn = iscsit_alloc_conn(np);
        if (!conn) {
-               pr_err("Could not allocate memory for"
-                       " new connection\n");
                /* Get another socket */
                return 1;
        }
-       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
-       conn->conn_state = TARG_CONN_STATE_FREE;
-
-       timer_setup(&conn->nopin_response_timer,
-                   iscsit_handle_nopin_response_timeout, 0);
-       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
-
-       if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
-               kfree(conn);
-               return 1;
-       }
 
        rc = np->np_transport->iscsit_accept_np(np, conn);
        if (rc == -ENOSYS) {
                complete(&np->np_restart_comp);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
+               iscsit_free_conn(conn);
                goto exit;
        } else if (rc < 0) {
                spin_lock_bh(&np->np_thread_lock);
@@ -1287,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                        np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                        spin_unlock_bh(&np->np_thread_lock);
                        complete(&np->np_restart_comp);
-                       iscsit_put_transport(conn->conn_transport);
-                       kfree(conn);
-                       conn = NULL;
+                       iscsit_free_conn(conn);
                        /* Get another socket */
                        return 1;
                }
                spin_unlock_bh(&np->np_thread_lock);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
-               goto out;
+               iscsit_free_conn(conn);
+               return 1;
        }
        /*
         * Perform the remaining iSCSI connection initialization items..
@@ -1447,7 +1450,6 @@ old_sess_out:
                tpg_np = NULL;
        }
 
-out:
        return 1;
 
 exit:
index 74ac3abc44a02564ba1231e96c1338b1ecd87bd7..3b8e3639ff5d01c6dbeb31ffb1201bb2453a822c 100644 (file)
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
 extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern void iscsit_free_conn(struct iscsi_conn *);
 extern int iscsit_start_kthreads(struct iscsi_conn *);
 extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
index 977a8307fbb1a4e0d66cb1c619834472bfe0d1ea..4f28165592056d5ec90309c5bff737f5ba66ea0b 100644 (file)
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
 
        mutex_lock(&tz->lock);
 
-       if (mode == THERMAL_DEVICE_ENABLED)
+       if (mode == THERMAL_DEVICE_ENABLED) {
                tz->polling_delay = data->polling_delay;
-       else
+               tz->passive_delay = data->passive_delay;
+       } else {
                tz->polling_delay = 0;
+               tz->passive_delay = 0;
+       }
 
        mutex_unlock(&tz->lock);
 
index c866cc1659606726584e47b6e74ff59fe2d6aa51..450ed66edf582b4c6c77e3a376a17173cfdf57b9 100644 (file)
@@ -1,16 +1,6 @@
-/*
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2016 Freescale Semiconductor, Inc.
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        int ret;
        struct qoriq_tmu_data *data;
        struct device_node *np = pdev->dev.of_node;
-       u32 site = 0;
+       u32 site;
 
        if (!np) {
                dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_tmu;
 
-       data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
-                               data, &tmu_tz_ops);
+       data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+                                                       data->sensor_id,
+                                                       data, &tmu_tz_ops);
        if (IS_ERR(data->tz)) {
                ret = PTR_ERR(data->tz);
                dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        }
 
        /* Enable monitoring */
-       site |= 0x1 << (15 - data->sensor_id);
+       site = 0x1 << (15 - data->sensor_id);
        tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
 
        return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
 {
        struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
 
-       thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
-
        /* Disable monitoring */
        tmu_write(data, TMR_DISABLE, &data->regs->tmr);
 
index 766521eb70715a1ff6c9a6b93434ec47d8716c49..7aed5337bdd35b3b3214372e7d5f9a01cf26789c 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  R-Car Gen3 THS thermal sensor driver
  *  Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
  *
  * Copyright (C) 2016 Renesas Electronics Corporation.
  * Copyright (C) 2016 Sang Engineering
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
  */
 #include <linux/delay.h>
 #include <linux/err.h>
index e77e63070e998e1d997537ca88da1e7dd4ea533c..78f932822d381c9dbc013aee618be56e533baaab 100644 (file)
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  R-Car THS/TSC thermal sensor driver
  *
  * Copyright (C) 2012 Renesas Solutions Corp.
  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  */
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
 };
 module_platform_driver(rcar_thermal_driver);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
index 5414c4a87bea3980eba7c2fce8e9896ff919cbef..27284a2dcd2b62395c5b140900ccca85a68ab683 100644 (file)
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                return -EIO;
 
        while (count > 0) {
+               int ret = 0;
+
                spin_lock_irqsave(&hp->lock, flags);
 
                rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                }
 
                if (hp->n_outbuf > 0)
-                       hvc_push(hp);
+                       ret = hvc_push(hp);
 
                spin_unlock_irqrestore(&hp->lock, flags);
 
+               if (!ret)
+                       break;
+
                if (count) {
                        if (hp->n_outbuf > 0)
                                hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
 #define MAX_TIMEOUT            (2000)
 static u32 timeout = MIN_TIMEOUT;
 
+/*
+ * Maximum number of bytes to get from the console driver if hvc_poll is
+ * called from driver (and can't sleep). Any more than this and we break
+ * and start polling with khvcd. This value was derived from from an OpenBMC
+ * console with the OPAL driver that results in about 0.25ms interrupts off
+ * latency.
+ */
+#define HVC_ATOMIC_READ_MAX    128
+
 #define HVC_POLL_READ  0x00000001
 #define HVC_POLL_WRITE 0x00000002
 
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
        if (!hp->irq_requested)
                poll_mask |= HVC_POLL_READ;
 
+ read_again:
        /* Read data if any */
-
        count = tty_buffer_request_room(&hp->port, N_INBUF);
 
        /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
 #endif /* CONFIG_MAGIC_SYSRQ */
                tty_insert_flip_char(&hp->port, buf[i], 0);
        }
-       if (n == count)
-               poll_mask |= HVC_POLL_READ;
-       read_total = n;
+       read_total += n;
+
+       if (may_sleep) {
+               /* Keep going until the flip is full */
+               spin_unlock_irqrestore(&hp->lock, flags);
+               cond_resched();
+               spin_lock_irqsave(&hp->lock, flags);
+               goto read_again;
+       } else if (read_total < HVC_ATOMIC_READ_MAX) {
+               /* Break and defer if it's a large read in atomic */
+               goto read_again;
+       }
+
+       /*
+        * Latency break, schedule another poll immediately.
+        */
+       poll_mask |= HVC_POLL_READ;
 
  out:
        /* Wakeup write queue if necessary */
index 27346d69f3938c0222fc59b467bbeb6e9ce778e7..f9b40a9dc4d33eb458d6a3469a0d41acdeab404b 100644 (file)
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
        }
 
        if (acm->susp_count) {
-               if (acm->putbuffer) {
-                       /* now to preserve order */
-                       usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
-                       acm->putbuffer = NULL;
-               }
                usb_anchor_urb(wb->urb, &acm->delayed);
                spin_unlock_irqrestore(&acm->write_lock, flags);
                return count;
-       } else {
-               if (acm->putbuffer) {
-                       /* at this point there is no good way to handle errors */
-                       acm_start_wb(acm, acm->putbuffer);
-                       acm->putbuffer = NULL;
-               }
        }
 
        stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
        return count;
 }
 
-static void acm_tty_flush_chars(struct tty_struct *tty)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int err;
-       unsigned long flags;
-
-       spin_lock_irqsave(&acm->write_lock, flags);
-
-       cur = acm->putbuffer;
-       if (!cur) /* nothing to do */
-               goto out;
-
-       acm->putbuffer = NULL;
-       err = usb_autopm_get_interface_async(acm->control);
-       if (err < 0) {
-               cur->use = 0;
-               acm->putbuffer = cur;
-               goto out;
-       }
-
-       if (acm->susp_count)
-               usb_anchor_urb(cur->urb, &acm->delayed);
-       else
-               acm_start_wb(acm, cur);
-out:
-       spin_unlock_irqrestore(&acm->write_lock, flags);
-       return;
-}
-
-static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int wbn;
-       unsigned long flags;
-
-overflow:
-       cur = acm->putbuffer;
-       if (!cur) {
-               spin_lock_irqsave(&acm->write_lock, flags);
-               wbn = acm_wb_alloc(acm);
-               if (wbn >= 0) {
-                       cur = &acm->wb[wbn];
-                       acm->putbuffer = cur;
-               }
-               spin_unlock_irqrestore(&acm->write_lock, flags);
-               if (!cur)
-                       return 0;
-       }
-
-       if (cur->len == acm->writesize) {
-               acm_tty_flush_chars(tty);
-               goto overflow;
-       }
-
-       cur->buf[cur->len++] = ch;
-       return 1;
-}
-
 static int acm_tty_write_room(struct tty_struct *tty)
 {
        struct acm *acm = tty->driver_data;
@@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = {
        .cleanup =              acm_tty_cleanup,
        .hangup =               acm_tty_hangup,
        .write =                acm_tty_write,
-       .put_char =             acm_tty_put_char,
-       .flush_chars =          acm_tty_flush_chars,
        .write_room =           acm_tty_write_room,
        .ioctl =                acm_tty_ioctl,
        .throttle =             acm_tty_throttle,
index eacc116e83da2ccf38b2640aa5b5b02b5b3621c8..ca06b20d7af9cc9567da1f243ad99935f8bc99e7 100644 (file)
@@ -96,7 +96,6 @@ struct acm {
        unsigned long read_urbs_free;
        struct urb *read_urbs[ACM_NR];
        struct acm_rb read_buffers[ACM_NR];
-       struct acm_wb *putbuffer;                       /* for acm_tty_put_char() */
        int rx_buflimit;
        spinlock_t read_lock;
        u8 *notification_buffer;                        /* to reassemble fragmented notifications */
index bec581fb7c6361891a81a3a0aa88d3863b686f58..656d247819c9d92c257581add9edbc7f166f899a 100644 (file)
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
 
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
-       rv = usb_submit_urb(desc->response, GFP_KERNEL);
+       rv = usb_submit_urb(desc->response, GFP_ATOMIC);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
                dev_err(&desc->intf->dev,
index 50a2362ed3ea118fb7b796bf0c546d01ca1c1d7e..48277bbc15e4d155fc9c1c7315fcf57d6347b51e 100644 (file)
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
 
+/**
+ * usb_of_get_companion_dev - Find the companion device
+ * @dev: the device pointer to find a companion
+ *
+ * Find the companion device from platform bus.
+ *
+ * Takes a reference to the returned struct device which needs to be dropped
+ * after use.
+ *
+ * Return: On success, a pointer to the companion device, %NULL on failure.
+ */
+struct device *usb_of_get_companion_dev(struct device *dev)
+{
+       struct device_node *node;
+       struct platform_device *pdev = NULL;
+
+       node = of_parse_phandle(dev->of_node, "companion", 0);
+       if (node)
+               pdev = of_find_device_by_node(node);
+
+       of_node_put(node);
+
+       return pdev ? &pdev->dev : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
 #endif
 
 MODULE_LICENSE("GPL");
index 66fe1b78d952969ebdedbecef8cc59f2805c2672..03432467b05fb12810d7cac77d954a11ed0a002a 100644 (file)
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
                                event == PM_EVENT_RESTORE);
                if (retval) {
                        dev_err(dev, "PCI post-resume error %d!\n", retval);
-                       if (hcd->shared_hcd)
-                               usb_hc_died(hcd->shared_hcd);
                        usb_hc_died(hcd);
                }
        }
index 228672f2c4a15eb841809a1f3d42caa925eb5fee..bfa5eda0cc2606a79373651fd079f87526238f36 100644 (file)
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
  * is submitted that needs that bandwidth.  Some other operating systems
  * allocate bandwidth early, when a configuration is chosen.
  *
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
  * This call is synchronous, and may not be used in an interrupt context.
  * Also, drivers must not change altsettings while urbs are scheduled for
  * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
                         alternate);
                return -EINVAL;
        }
+       /*
+        * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+        * including freeing dropped endpoint ring buffers.
+        * Make sure the interface endpoints are flushed before that
+        */
+       usb_disable_interface(dev, iface, false);
 
        /* Make sure we have enough bandwidth for this alternate interface.
         * Remove the current alt setting and add the new alt setting.
index fd77442c2d12e1c9eba50b606d990fbb48c84e8b..651708d8c9083b99df235dddc3eddde12b9e5188 100644 (file)
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
        return NULL;
 }
 EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
-
-/**
- * usb_of_get_companion_dev - Find the companion device
- * @dev: the device pointer to find a companion
- *
- * Find the companion device from platform bus.
- *
- * Takes a reference to the returned struct device which needs to be dropped
- * after use.
- *
- * Return: On success, a pointer to the companion device, %NULL on failure.
- */
-struct device *usb_of_get_companion_dev(struct device *dev)
-{
-       struct device_node *node;
-       struct platform_device *pdev = NULL;
-
-       node = of_parse_phandle(dev->of_node, "companion", 0);
-       if (node)
-               pdev = of_find_device_by_node(node);
-
-       of_node_put(node);
-
-       return pdev ? &pdev->dev : NULL;
-}
-EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
index 097057d2eacf7bcb18316473c6f0def8a5744b62..e77dfe5ed5ec7ec84462ba264865aa6ae7234ebc 100644 (file)
@@ -178,6 +178,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+       { USB_DEVICE(0x0218, 0x0201), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* WORLDE easy key (easykey.25) MIDI controller  */
        { USB_DEVICE(0x0218, 0x0401), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +410,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x2040, 0x7200), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* DJI CineSSD */
+       { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 9a53a58e676e0ae775550fb872ed168c65598f84..577642895b57d118eb37115cdb005089ee5c6e08 100644 (file)
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
        dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
                (unsigned long)res->start, hsotg->regs);
 
-       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
-
        retval = dwc2_lowlevel_hw_init(hsotg);
        if (retval)
                return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (retval)
                return retval;
 
+       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
        retval = dwc2_get_dr_mode(hsotg);
        if (retval)
                goto error;
index 40bf9e0bbc59754e756c0eb2b0c752a5c8baef61..4c2771c5e7276528d70941d73024ae2b4aedd977 100644 (file)
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int dwc3_of_simple_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
index 5edd7947036832f3042f503dbe8cdd9d0d5b0cba..1286076a8890308a66d6ef494b85169ae92d7798 100644 (file)
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
        u32             value;
 
        reg = pcim_iomap(pci, GP_RWBAR, 0);
-       if (IS_ERR(reg))
-               return PTR_ERR(reg);
+       if (!reg)
+               return -ENOMEM;
 
        value = readl(reg + GP_RWREG1);
        if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
index 032ea7d709ba534907c6baf02bb3a634a4fa44f5..2b53194081bafd3041f0fa7278711e984dd75237 100644 (file)
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
 
 /**
  * dwc3_gadget_start_config - configure ep resources
- * @dwc: pointer to our controller context structure
  * @dep: endpoint that is being enabled
  *
  * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
index 53a48f561458619f56fb71d079052441dc848ff6..587c5037ff079e0d74984703c91cc5dbca8428cd 100644 (file)
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
        struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+       int i;
 
        usb_del_gadget_udc(&fotg210->gadget);
        iounmap(fotg210->reg);
        free_irq(platform_get_irq(pdev, 0), fotg210);
 
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
        return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        /* initialize udc */
        fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
        if (fotg210 == NULL)
-               goto err_alloc;
+               goto err;
 
        for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
                _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->reg = ioremap(res->start, resource_size(res));
        if (fotg210->reg == NULL) {
                pr_err("ioremap error.\n");
-               goto err_map;
+               goto err_alloc;
        }
 
        spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
                                GFP_KERNEL);
        if (fotg210->ep0_req == NULL)
-               goto err_req;
+               goto err_map;
 
        fotg210_init(fotg210);
 
@@ -1187,12 +1190,14 @@ err_req:
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-       if (fotg210->reg)
-               iounmap(fotg210->reg);
+       iounmap(fotg210->reg);
 
 err_alloc:
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
+err:
        return ret;
 }
 
index 318246d8b2e2b574178acc9745e7d48602742485..b02ab2a8d927f6f76fb3173113b83dbd6555363a 100644 (file)
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
                writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
        } else {
                writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
-               stop_activity(dev, dev->driver);
+               stop_activity(dev, NULL);
        }
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
+       if (!is_on && dev->driver)
+               dev->driver->disconnect(&dev->gadget);
+
        return 0;
 }
 
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
                nuke(&dev->ep[i]);
 
        /* report disconnect; the driver is already quiesced */
-       if (driver)
+       if (driver) {
+               spin_unlock(&dev->lock);
                driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
 
        usb_reinit(dev);
 }
@@ -3341,6 +3347,8 @@ next_endpoints:
                BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
 {
        struct net2280_ep       *ep;
        u32                     tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                        if (disconnect || reset) {
                                stop_activity(dev, dev->driver);
                                ep0_start(dev);
+                               spin_unlock(&dev->lock);
                                if (reset)
                                        usb_gadget_udc_reset
                                                (&dev->gadget, dev->driver);
                                else
                                        (dev->driver->disconnect)
                                                (&dev->gadget);
+                               spin_lock(&dev->lock);
                                return;
                        }
                }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
+               spin_unlock(&dev->lock);
                if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
                        if (dev->driver->suspend)
                                dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                                dev->driver->resume(&dev->gadget);
                        /* at high speed, note erratum 0133 */
                }
+               spin_lock(&dev->lock);
                stat &= ~tmp;
        }
 
index 1f879b3f2c96ad1f7dc417da185a545d0e52a484..e1656f361e081d398d8ab55820a00cf9497f3562 100644 (file)
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
        switch (speed) {
        case USB_STA_SPEED_SS:
                usb3->gadget.speed = USB_SPEED_SUPER;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_HS:
                usb3->gadget.speed = USB_SPEED_HIGH;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_FS:
                usb3->gadget.speed = USB_SPEED_FULL;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        default:
                usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
                        /* for control pipe */
                        usb3->gadget.ep0 = &usb3_ep->ep;
                        usb_ep_set_maxpacket_limit(&usb3_ep->ep,
-                                               USB3_EP0_HSFS_MAX_PACKET_SIZE);
+                                               USB3_EP0_SS_MAX_PACKET_SIZE);
                        usb3_ep->ep.caps.type_control = true;
                        usb3_ep->ep.caps.dir_in = true;
                        usb3_ep->ep.caps.dir_out = true;
index 072bd5d5738e730f896260b7ab7db95f5cbd63e9..5b8a3d9530c4a89bacb3850852c72d3a19c979dd 100644 (file)
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
        } else {
                int frame = 0;
                dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
-               msleep(100);
+               mdelay(100);
                return frame;
        }
 }
index ef350c33dc4a8615a0188af0cea13ae29a876532..b1f27aa38b1008c0edc8f425e0caf4b4a2c06959 100644 (file)
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
        in_ep_ctx->deq = out_ep_ctx->deq;
        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+       if (xhci->quirks & XHCI_MTK_HOST) {
+               in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
+               in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
+       }
 }
 
 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
index 8dc77e34a85989d78246ad0489635ac393cad636..94e939249b2b6dc0f65fb197a0b084889a7fb153 100644 (file)
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 {
        const struct xhci_plat_priv *priv_match;
        const struct hc_driver  *driver;
-       struct device           *sysdev;
+       struct device           *sysdev, *tmpdev;
        struct xhci_hcd         *xhci;
        struct resource         *res;
        struct usb_hcd          *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto disable_clk;
        }
 
-       if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
-               xhci->quirks |= XHCI_HW_LPM_DISABLE;
+       /* imod_interval is the interrupt moderation value in nanoseconds. */
+       xhci->imod_interval = 40000;
 
-       if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
-               xhci->quirks |= XHCI_LPM_SUPPORT;
+       /* Iterate over all parent nodes for finding quirks */
+       for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
 
-       if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
-               xhci->quirks |= XHCI_BROKEN_PORT_PED;
+               if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
+                       xhci->quirks |= XHCI_HW_LPM_DISABLE;
 
-       /* imod_interval is the interrupt moderation value in nanoseconds. */
-       xhci->imod_interval = 40000;
-       device_property_read_u32(sysdev, "imod-interval-ns",
-                                &xhci->imod_interval);
+               if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
+                       xhci->quirks |= XHCI_LPM_SUPPORT;
+
+               if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+                       xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+               device_property_read_u32(tmpdev, "imod-interval-ns",
+                                        &xhci->imod_interval);
+       }
 
        hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
        if (IS_ERR(hcd->usb_phy)) {
index 61f48b17e57b6688599e9269c6933cfeb51fdf92..0420eefa647a15cb5321dfa5fd95556a5a5432e5 100644 (file)
@@ -37,6 +37,21 @@ static unsigned long long quirks;
 module_param(quirks, ullong, S_IRUGO);
 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
 
+static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+{
+       struct xhci_segment *seg = ring->first_seg;
+
+       if (!td || !td->start_seg)
+               return false;
+       do {
+               if (seg == td->start_seg)
+                       return true;
+               seg = seg->next;
+       } while (seg && seg != ring->first_seg);
+
+       return false;
+}
+
 /* TODO: copied from ehci-hcd.c - can this be refactored? */
 /*
  * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                goto done;
        }
 
+       /*
+        * check ring is not re-allocated since URB was enqueued. If it is, then
+        * make sure none of the ring related pointers in this URB private data
+        * are touched, such as td_list, otherwise we overwrite freed data
+        */
+       if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
+               xhci_err(xhci, "Canceled URB td not found on endpoint ring");
+               for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
+                       td = &urb_priv->td[i];
+                       if (!list_empty(&td->cancelled_td_list))
+                               list_del_init(&td->cancelled_td_list);
+               }
+               goto err_giveback;
+       }
+
        if (xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HC halted, freeing TD manually.");
index 82f220631bd7b9ffa77599008c42e6611426887f..b5d6616442635b8bf4c814bfd629a9ec18e927a3 100644 (file)
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
        mask &= 0x0f;
        val &= 0x0f;
        d = (priv->reg[1] & (~mask)) ^ val;
-       if (set_1284_register(pp, 2, d, GFP_KERNEL))
+       if (set_1284_register(pp, 2, d, GFP_ATOMIC))
                return 0;
        priv->reg[1] = d;
        return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
 {
        unsigned char ret;
 
-       if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+       if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
                return 0;
        return ret & 0xf8;
 }
index 3be40eaa1ac9b2caf493a8fd21e8980982b5d9a1..6d9fd5f649036e8fb47c39eaeffa26f856724e99 100644 (file)
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        spin_unlock_irqrestore(&dev->lock, flags);
        mutex_unlock(&dev->io_mutex);
 
+       if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+               return -EIO;
+
        return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
 {
        struct usb_yurex *dev;
        int i, set = 0, retval = 0;
-       char buffer[16];
+       char buffer[16 + 1];
        char *data = buffer;
        unsigned long long c, c2 = 0;
        signed long timeout = 0;
        DEFINE_WAIT(wait);
 
-       count = min(sizeof(buffer), count);
+       count = min(sizeof(buffer) - 1, count);
        dev = file->private_data;
 
        /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                retval = -EFAULT;
                goto error;
        }
+       buffer[count] = 0;
        memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
 
        switch (buffer[0]) {
index eecfd067136287a65c89bb58409bf054f9ea6e1c..d045d8458f81c27930a090ab06ee1539cfeb420a 100644 (file)
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
                (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
                SSUSB_U2_PORT_HOST_SEL));
 
-       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
                mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+               if (mtu->is_u3_ip)
+                       mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+                                    SSUSB_U3_PORT_DUAL_MODE);
+       }
 
        return ssusb_check_clocks(mtu->ssusb, check_clk);
 }
index 6ee371478d89e320e53318161007771608b64713..a45bb253939f2c8e1a85c1e7ee7b84ad3f0182a5 100644 (file)
 
 /* U3D_SSUSB_U3_CTRL_0P */
 #define SSUSB_U3_PORT_SSP_SPEED        BIT(9)
+#define SSUSB_U3_PORT_DUAL_MODE        BIT(7)
 #define SSUSB_U3_PORT_HOST_SEL         BIT(2)
 #define SSUSB_U3_PORT_PDN              BIT(1)
 #define SSUSB_U3_PORT_DIS              BIT(0)
index e53c682610176bf694e3130031181d669e3b83fa..9bbcee37524e56ccf7e9473b23c271924bedaf7d 100644 (file)
@@ -173,7 +173,7 @@ struct ump_interrupt {
 }  __attribute__((packed));
 
 
-#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 6) & 0x01)
 #define TIUMP_GET_FUNC_FROM_CODE(c)    ((c) & 0x0f)
 #define TIUMP_INTERRUPT_CODE_LSR       0x03
 #define TIUMP_INTERRUPT_CODE_MSR       0x04
index 3010878f7f8ed9004bb78a15f63e1eb83261742e..e3c5832337e0bdbb83df78285a697ac5f5a8af3f 100644 (file)
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
 
 static int ti_get_port_from_code(unsigned char code)
 {
-       return (code >> 4) - 3;
+       return (code >> 6) & 0x01;
 }
 
 static int ti_get_func_from_code(unsigned char code)
index c267f2812a046db0920670e3834a8dbb472f534d..e227bb5b794fe3905965647a442bbd43254d5099 100644 (file)
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
                return 0;
        }
 
+       if ((us->fflags & US_FL_NO_ATA_1X) &&
+                       (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+               memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+                      sizeof(usb_stor_sense_invalidCDB));
+               srb->result = SAM_STAT_CHECK_CONDITION;
+               done(srb);
+               return 0;
+       }
+
        /* enqueue the command and wake up the control thread */
        srb->scsi_done = done;
        us->srb = srb;
index 9e9de5452860d39c2a6da87546459a38086b21b9..1f7b401c4d041d44390f3fa519dd95b46aab1b42 100644 (file)
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
                sdev->skip_ms_page_8 = 1;
                sdev->wce_default_on = 1;
        }
+
+       /*
+        * Some disks return the total number of blocks in response
+        * to READ CAPACITY rather than the highest block number.
+        * If this device makes that mistake, tell the sd driver.
+        */
+       if (devinfo->flags & US_FL_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
+
+       /*
+        * Some devices don't like MODE SENSE with page=0x3f,
+        * which is the command used for checking if a device
+        * is write-protected.  Now that we tell the sd driver
+        * to do a 192-byte transfer with this command the
+        * majority of devices work fine, but a few still can't
+        * handle it.  The sd driver will simply assume those
+        * devices are write-enabled.
+        */
+       if (devinfo->flags & US_FL_NO_WP_DETECT)
+               sdev->skip_ms_page_3f = 1;
+
        scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
index 22fcfccf453afeec176b5ab35390d6688e8953f4..f7f83b21dc746ab922d7f7e7c61add76a55fc560 100644 (file)
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV(  0x2735, 0x100b, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_GO_SLOW ),
 
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV(  0x2ca3, 0x0031, 0x0000, 0x9999,
+               "DJI",
+               "CineSSD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /*
  * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
  * Mio Moov 330
index 95a2b10127db564607dfbb9f7a529a6719fd0e31..76299b6ff06dae4eb29e49960d7403b66c39e3d8 100644 (file)
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
 /* API for the port drivers */
 
 /**
- * typec_match_altmode - Match SVID to an array of alternate modes
+ * typec_match_altmode - Match SVID and mode to an array of alternate modes
  * @altmodes: Array of alternate modes
- * @n: Number of elements in the array, or -1 for NULL termiated arrays
+ * @n: Number of elements in the array, or -1 for NULL terminated arrays
  * @svid: Standard or Vendor ID to match with
+ * @mode: Mode to match with
  *
- * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no
+ * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
  * match is found.
  */
 struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
index c202975f8097eb37f05cfe29d63dedf26d8a1d11..e61dffb27a0c64bab899c36f674f922278d92f6e 100644 (file)
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
  * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
  * @port: USB Type-C Port that supports the alternate mode
  * @desc: Description of the alternate mode
- * @drvdata: Private pointer to driver specific info
  *
  * This routine is used to register an alternate mode that @port is capable of
  * supporting.
index 96c1d8400822a3d852553e5baddaaed9907d7f71..b13c6b4b2c665a332a40aeada1eaa1e421d9a4aa 100644 (file)
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
        list_for_each_entry_safe(node, n, &d->pending_list, node) {
                struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
                if (msg->iova <= vq_msg->iova &&
-                   msg->iova + msg->size - 1 > vq_msg->iova &&
+                   msg->iova + msg->size - 1 >= vq_msg->iova &&
                    vq_msg->type == VHOST_IOTLB_MISS) {
                        vhost_poll_queue(&node->vq->poll);
                        list_del(&node->node);
index b459edfacff35e0526fdc912cd0179ef8126ea74..90d387b50ab747f505597e87b439f1dcfe5f489f 100644 (file)
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
          This value is used to allocate enough space in internal
          tables needed for physical memory administration.
 
-config XEN_SCRUB_PAGES
-       bool "Scrub pages before returning them to system"
+config XEN_SCRUB_PAGES_DEFAULT
+       bool "Scrub pages before returning them to system by default"
        depends on XEN_BALLOON
        default y
        help
          Scrub pages before returning them to the system for reuse by
          other domains.  This makes sure that any confidential data
          is not accidentally visible to other domains.  Is it more
-         secure, but slightly less efficient.
+         secure, but slightly less efficient. This can be controlled with
+         xen_scrub_pages=0 parameter and
+         /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+         This option only sets the default value.
+
          If in doubt, say yes.
 
 config XEN_DEV_EVTCHN
index d4265c8ebb22a24b1ab8794ab79b592793d14908..b1357aa4bc552eb3a5989dab5eeacf9295d2a2d8 100644 (file)
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
 
 static void disable_hotplug_cpu(int cpu)
 {
-       if (cpu_online(cpu)) {
-               lock_device_hotplug();
+       if (!cpu_is_hotpluggable(cpu))
+               return;
+       lock_device_hotplug();
+       if (cpu_online(cpu))
                device_offline(get_cpu_device(cpu));
-               unlock_device_hotplug();
-       }
-       if (cpu_present(cpu))
+       if (!cpu_online(cpu) && cpu_present(cpu)) {
                xen_arch_unregister_cpu(cpu);
-
-       set_cpu_present(cpu, false);
+               set_cpu_present(cpu, false);
+       }
+       unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
index 08e4af04d6f2c32850a049a83721933a82883b8c..e6c1934734b7d9bdde87e8a9e006e1584c88f15d 100644 (file)
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
                clear_evtchn_to_irq_row(row);
        }
 
-       evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+       evtchn_to_irq[row][col] = irq;
        return 0;
 }
 
index 57390c7666e5dd8d44bfe9bdf1e503afb13de189..b0b02a5011672b6670e136728b2c2a8a8f2ee68e 100644 (file)
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
        return true;
 }
 
-static void unmap_if_in_range(struct gntdev_grant_map *map,
-                             unsigned long start, unsigned long end)
+static int unmap_if_in_range(struct gntdev_grant_map *map,
+                             unsigned long start, unsigned long end,
+                             bool blockable)
 {
        unsigned long mstart, mend;
        int err;
 
+       if (!in_range(map, start, end))
+               return 0;
+
+       if (!blockable)
+               return -EAGAIN;
+
        mstart = max(start, map->vma->vm_start);
        mend   = min(end,   map->vma->vm_end);
        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
                                (mend - mstart) >> PAGE_SHIFT);
        WARN_ON(err);
+
+       return 0;
 }
 
 static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
        struct gntdev_grant_map *map;
        int ret = 0;
 
-       /* TODO do we really need a mutex here? */
        if (blockable)
                mutex_lock(&priv->lock);
        else if (!mutex_trylock(&priv->lock))
                return -EAGAIN;
 
        list_for_each_entry(map, &priv->maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
        list_for_each_entry(map, &priv->freeable_maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
 
 out_unlock:
index c93d8ef8df3483bbc393b2101c189120f844b634..5bb01a62f214da835ca2e941c3c928b50cf733a0 100644 (file)
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                /*
                 * The Xenstore watch fires directly after registering it and
                 * after a suspend/resume cycle. So ENOENT is no error but
-                * might happen in those cases.
+                * might happen in those cases. ERANGE is observed when we get
+                * an empty value (''), this happens when we acknowledge the
+                * request by writing '\0' below.
                 */
-               if (err != -ENOENT)
+               if (err != -ENOENT && err != -ERANGE)
                        pr_err("Error %d reading sysrq code in control/sysrq\n",
                               err);
                xenbus_transaction_end(xbt, 1);
index 084799c6180e6e6cb281eb882637b42999a1e683..3782cf070338e3fa5f830184a784dd9e2d0c666a 100644 (file)
 
 #include <xen/interface/memory.h>
 #include <xen/mem-reservation.h>
+#include <linux/moduleparam.h>
+
+bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
+core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
 
 /*
  * Use one extent per PAGE_SIZE to avoid to break down the page into
index 294f35ce9e46bc063a7e01061462cf3e298b4364..63c1494a8d73bf2f53f0f0a1887290f338ae454a 100644 (file)
@@ -44,6 +44,7 @@
 #include <xen/xenbus.h>
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/mem-reservation.h>
 
 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
 
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
 static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
 static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
 static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
 
 static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
                              char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
        &dev_attr_max_schedule_delay.attr.attr,
        &dev_attr_retry_count.attr.attr,
        &dev_attr_max_retry_count.attr.attr,
+       &dev_attr_scrub_pages.attr.attr,
        NULL
 };
 
index f2088838f690b218a4f16ee0a56bfa23a08acb73..5b471889d7237c926682392b123db99e7ab6656f 100644 (file)
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(modalias);
 
+static ssize_t state_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n",
+                       xenbus_strstate(to_xenbus_device(dev)->state));
+}
+static DEVICE_ATTR_RO(state);
+
 static struct attribute *xenbus_dev_attrs[] = {
        &dev_attr_nodename.attr,
        &dev_attr_devtype.attr,
        &dev_attr_modalias.attr,
+       &dev_attr_state.attr,
        NULL,
 };
 
index 0c3285c8db95b4ec6457fdfe759e1997c9c69a7f..476dcbb79713d20d12023dfb265a0ef62e48e6fc 100644 (file)
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
                goto inval;
 
        args = strchr(name, ' ');
-       if (!args)
-               goto inval;
-       do {
-               *args++ = 0;
-       } while(*args == ' ');
-       if (!*args)
-               goto inval;
+       if (args) {
+               do {
+                       *args++ = 0;
+               } while(*args == ' ');
+               if (!*args)
+                       goto inval;
+       }
 
        /* determine command to perform */
        _debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
 
                if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
                        afs_put_cell(net, cell);
-               printk("kAFS: Added new cell '%s'\n", name);
        } else {
                goto inval;
        }
index 53af9f5253f4fc5556e568b9b78397bba61e00cc..2cddfe7806a412e4ecf26a61887fe4474ab380de 100644 (file)
@@ -1280,6 +1280,7 @@ struct btrfs_root {
        int send_in_progress;
        struct btrfs_subvolume_writers *subv_writers;
        atomic_t will_be_snapshotted;
+       atomic_t snapshot_force_cow;
 
        /* For qgroup metadata reserved space */
        spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do {                                                                      \
 #define btrfs_debug(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #endif
@@ -3404,6 +3405,13 @@ do {                                                     \
        rcu_read_unlock();                              \
 } while (0)
 
+#define btrfs_no_printk_in_rcu(fs_info, fmt, args...)  \
+do {                                                   \
+       rcu_read_lock();                                \
+       btrfs_no_printk(fs_info, fmt, ##args);          \
+       rcu_read_unlock();                              \
+} while (0)
+
 #define btrfs_printk_ratelimited(fs_info, fmt, args...)                \
 do {                                                           \
        static DEFINE_RATELIMIT_STATE(_rs,                      \
index 5124c15705ce777ef88950529343310c389686df..05dc3c17cb62aa38dc7a18adc886475ac22fd80b 100644 (file)
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        atomic_set(&root->log_batch, 0);
        refcount_set(&root->refs, 1);
        atomic_set(&root->will_be_snapshotted, 0);
+       atomic_set(&root->snapshot_force_cow, 0);
        root->log_transid = 0;
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
index de6f75f5547bdb159cf0764e5d4bfaf19a2c1085..2d9074295d7f0bb327010feeccffb1630c455477 100644 (file)
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * root: the root of the parent directory
  * rsv: block reservation
  * items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
  *
  * This function is used to reserve the space for snapshot/subvolume
  * creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * the space reservation mechanism in start_transaction().
  */
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
-                                    struct btrfs_block_rsv *rsv,
-                                    int items,
+                                    struct btrfs_block_rsv *rsv, int items,
                                     bool use_global_rsv)
 {
+       u64 qgroup_num_bytes = 0;
        u64 num_bytes;
        int ret;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 
        if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
                /* One for parent inode, two for dir entries */
-               num_bytes = 3 * fs_info->nodesize;
-               ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+               qgroup_num_bytes = 3 * fs_info->nodesize;
+               ret = btrfs_qgroup_reserve_meta_prealloc(root,
+                               qgroup_num_bytes, true);
                if (ret)
                        return ret;
-       } else {
-               num_bytes = 0;
        }
 
        num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (ret == -ENOSPC && use_global_rsv)
                ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
 
-       if (ret && num_bytes)
-               btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+       if (ret && qgroup_num_bytes)
+               btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
 
        return ret;
 }
index 9357a19d2bff2c76de1b29d2e221333b6f76da7e..3ea5339603cff14f8dc103f6561091e83dc0b81a 100644 (file)
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 disk_num_bytes;
        u64 ram_bytes;
        int extent_type;
-       int ret, err;
+       int ret;
        int type;
        int nocow;
        int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
                         * if there are pending snapshots for this root,
                         * we fall into common COW way.
                         */
-                       if (!nolock) {
-                               err = btrfs_start_write_no_snapshotting(root);
-                               if (!err)
-                                       goto out_check;
-                       }
+                       if (!nolock && atomic_read(&root->snapshot_force_cow))
+                               goto out_check;
                        /*
                         * force cow if csum exists in the range.
                         * this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
                        ret = csum_exist_in_range(fs_info, disk_bytenr,
                                                  num_bytes);
                        if (ret) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
-
                                /*
                                 * ret could be -EIO if the above fails to read
                                 * metadata.
@@ -1431,11 +1425,8 @@ next_slot:
                                WARN_ON_ONCE(nolock);
                                goto out_check;
                        }
-                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
+                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
                                goto out_check;
-                       }
                        nocow = 1;
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
 out_check:
                if (extent_end <= start) {
                        path->slots[0]++;
-                       if (!nolock && nocow)
-                               btrfs_end_write_no_snapshotting(root);
                        if (nocow)
                                btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                        goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
                                             end, page_started, nr_written, 1,
                                             NULL);
                        if (ret) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
                                             EXTENT_CLEAR_DATA_RESV,
                                             PAGE_UNLOCK | PAGE_SET_PRIVATE2);
 
-               if (!nolock && nocow)
-                       btrfs_end_write_no_snapshotting(root);
                cur_offset = extent_end;
 
                /*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                drop_inode = 1;
        } else {
                struct dentry *parent = dentry->d_parent;
+               int ret;
+
                err = btrfs_update_inode(trans, root, inode);
                if (err)
                        goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                                goto fail;
                }
                d_instantiate(dentry, inode);
-               btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
+                                        true, NULL);
+               if (ret == BTRFS_NEED_TRANS_COMMIT) {
+                       err = btrfs_commit_transaction(trans);
+                       trans = NULL;
+               }
        }
 
 fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
-       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       struct btrfs_log_ctx ctx_root;
+       struct btrfs_log_ctx ctx_dest;
+       bool sync_log_root = false;
+       bool sync_log_dest = false;
+       bool commit_transaction = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
+       btrfs_init_log_ctx(&ctx_root, old_inode);
+       btrfs_init_log_ctx(&ctx_dest, new_inode);
+
        /* close the race window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        if (root_log_pinned) {
                parent = new_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx_root);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log_root = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                root_log_pinned = false;
        }
        if (dest_log_pinned) {
-               parent = old_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
-                               parent);
+               if (!commit_transaction) {
+                       parent = old_dentry->d_parent;
+                       ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
+                                                BTRFS_I(new_dir), parent,
+                                                false, &ctx_dest);
+                       if (ret == BTRFS_NEED_LOG_SYNC)
+                               sync_log_dest = true;
+                       else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                               commit_transaction = true;
+                       ret = 0;
+               }
                btrfs_end_log_trans(dest);
                dest_log_pinned = false;
        }
@@ -9583,8 +9594,26 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret2 = btrfs_end_transaction(trans);
-       ret = ret ? ret : ret2;
+       if (!ret && sync_log_root && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
+                                    &ctx_root);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (!ret && sync_log_dest && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
+                                    &ctx_dest);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
+       struct btrfs_log_ctx ctx;
+       bool sync_log = false;
+       bool commit_transaction = false;
 
        if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (log_pinned) {
                struct dentry *parent = new_dentry->d_parent;
 
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               btrfs_init_log_ctx(&ctx, old_inode);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
@@ -9856,7 +9895,19 @@ out_fail:
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
-       btrfs_end_transaction(trans);
+       if (!ret && sync_log) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index 63600dc2ac4cb104d0feb1df1d5b8ccc650d1091..d60b6caf09e857ef7c39e8ce337171d04cca1d72 100644 (file)
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        struct btrfs_pending_snapshot *pending_snapshot;
        struct btrfs_trans_handle *trans;
        int ret;
+       bool snapshot_force_cow = false;
 
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                goto free_pending;
        }
 
+       /*
+        * Force new buffered writes to reserve space even when NOCOW is
+        * possible. This is to avoid later writeback (running dealloc) to
+        * fallback to COW mode and unexpectedly fail with ENOSPC.
+        */
        atomic_inc(&root->will_be_snapshotted);
        smp_mb__after_atomic();
        /* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto dec_and_free;
 
+       /*
+        * All previous writes have started writeback in NOCOW mode, so now
+        * we force future writes to fallback to COW mode during snapshot
+        * creation.
+        */
+       atomic_inc(&root->snapshot_force_cow);
+       snapshot_force_cow = true;
+
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
 
        btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 fail:
        btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
 dec_and_free:
+       if (snapshot_force_cow)
+               atomic_dec(&root->snapshot_force_cow);
        if (atomic_dec_and_test(&root->will_be_snapshotted))
                wake_up_var(&root->will_be_snapshotted);
 free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
 
                same_lock_start = min_t(u64, loff, dst_loff);
                same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+       } else {
+               /*
+                * If the source and destination inodes are different, the
+                * source's range end offset matches the source's i_size, that
+                * i_size is not a multiple of the sector size, and the
+                * destination range does not go past the destination's i_size,
+                * we must round down the length to the nearest sector size
+                * multiple. If we don't do this adjustment we end replacing
+                * with zeroes the bytes in the range that starts at the
+                * deduplication range's end offset and ends at the next sector
+                * size multiple.
+                */
+               if (loff + olen == i_size_read(src) &&
+                   dst_loff + len < i_size_read(dst)) {
+                       const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+                       len = round_down(i_size_read(src), sz) - loff;
+                       olen = len;
+               }
        }
 
 again:
index 4353bb69bb8672267286562c1bdd629fecca4e9b..d4917c0cddf57f11b6fbb600d73e629fcbd14093 100644 (file)
@@ -1019,10 +1019,9 @@ out_add_root:
        spin_unlock(&fs_info->qgroup_lock);
 
        ret = btrfs_commit_transaction(trans);
-       if (ret) {
-               trans = NULL;
+       trans = NULL;
+       if (ret)
                goto out_free_path;
-       }
 
        ret = qgroup_rescan_init(fs_info, 0, 1);
        if (!ret) {
index 1650dc44a5e37e483efebcf1a9e84117b0a1ed6a..3c2ae0e4f25a8de78040f6ca0ca94d070f158dd2 100644 (file)
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
  * Call this after adding a new name for a file and it will properly
  * update the log to reflect the new name.
  *
- * It will return zero if all goes well, and it will return 1 if a
- * full transaction commit is required.
+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
+ * true (because it's not used).
+ *
+ * Return value depends on whether @sync_log is true or false.
+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *            committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
+ *            otherwise.
+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
+ *             to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
+ *             or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *             committed (without attempting to sync the log).
  */
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent)
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
+       int ret;
 
        /*
         * this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
         */
        if (inode->logged_trans <= fs_info->last_trans_committed &&
            (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
-               return 0;
+               return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
+                       BTRFS_DONT_NEED_LOG_SYNC;
+
+       if (sync_log) {
+               struct btrfs_log_ctx ctx2;
+
+               btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
+               ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                            LOG_INODE_EXISTS, &ctx2);
+               if (ret == BTRFS_NO_LOG_SYNC)
+                       return BTRFS_DONT_NEED_TRANS_COMMIT;
+               else if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+
+               ret = btrfs_sync_log(trans, inode->root, &ctx2);
+               if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+               return BTRFS_DONT_NEED_TRANS_COMMIT;
+       }
+
+       ASSERT(ctx);
+       ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                    LOG_INODE_EXISTS, ctx);
+       if (ret == BTRFS_NO_LOG_SYNC)
+               return BTRFS_DONT_NEED_LOG_SYNC;
+       else if (ret)
+               return BTRFS_NEED_TRANS_COMMIT;
 
-       return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
-                                     LOG_INODE_EXISTS, NULL);
+       return BTRFS_NEED_LOG_SYNC;
 }
 
index 122e68b89a5ade4d64dcaf7ce887980c17fc9d5e..7ab9bb88a63935664a3d0dc556987f8f08f1561e 100644 (file)
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
                             int for_rename);
 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
                                   struct btrfs_inode *dir);
+/* Return values for btrfs_log_new_name() */
+enum {
+       BTRFS_DONT_NEED_TRANS_COMMIT,
+       BTRFS_NEED_TRANS_COMMIT,
+       BTRFS_DONT_NEED_LOG_SYNC,
+       BTRFS_NEED_LOG_SYNC,
+};
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent);
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx);
 
 #endif
index da86706123ffa4d85ce19148be2157edb9d7d3a4..f4405e430da6e003306e1a4d471731e1a13c9230 100644 (file)
@@ -4491,7 +4491,12 @@ again:
 
        /* Now btrfs_update_device() will change the on-disk size. */
        ret = btrfs_update_device(trans, device);
-       btrfs_end_transaction(trans);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       } else {
+               ret = btrfs_commit_transaction(trans);
+       }
 done:
        btrfs_free_path(path);
        if (ret) {
index 4cc679d5bf58c7bc0d0a7eb09cbc07ab0575197d..6f1ae3ac97896c6fff85e947b9fe7a6553368457 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/buffer_head.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/bio.h>
-#include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/mpage.h>
index 43ca3b763875d43c83ae43e9bd6b4b475493ac46..eab1359d05532afb6960c7acfb5c4fee2891eac9 100644 (file)
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
 
 /*
  * create a new fs client
+ *
+ * Success or not, this function consumes @fsopt and @opt.
  */
 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
                                        struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        struct ceph_fs_client *fsc;
        int page_count;
        size_t size;
-       int err = -ENOMEM;
+       int err;
 
        fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
-       if (!fsc)
-               return ERR_PTR(-ENOMEM);
+       if (!fsc) {
+               err = -ENOMEM;
+               goto fail;
+       }
 
        fsc->client = ceph_create_client(opt, fsc);
        if (IS_ERR(fsc->client)) {
                err = PTR_ERR(fsc->client);
                goto fail;
        }
+       opt = NULL; /* fsc->client now owns this */
 
        fsc->client->extra_mon_dispatch = extra_mon_dispatch;
        fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
        ceph_destroy_client(fsc->client);
 fail:
        kfree(fsc);
+       if (opt)
+               ceph_destroy_options(opt);
+       destroy_mount_options(fsopt);
        return ERR_PTR(err);
 }
 
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               destroy_mount_options(fsopt);
-               ceph_destroy_options(opt);
                goto out_final;
        }
 
index 35c83fe7dba0115007a344821d0a4d5dfcbbd23d..abcd78e332feb05ad8fa7f540d14917d2835f68b 100644 (file)
@@ -6,6 +6,7 @@ config CIFS
        select CRYPTO_MD4
        select CRYPTO_MD5
        select CRYPTO_SHA256
+       select CRYPTO_SHA512
        select CRYPTO_CMAC
        select CRYPTO_HMAC
        select CRYPTO_ARC4
index b380e0871372df09635079008c970f6c8b6933c0..a2b2355e7f019c3bac02da2a6ffc3c9786c39f57 100644 (file)
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_LESSTHAN:
                *target = '<';
                break;
-       case SFM_SLASH:
-               *target = '\\';
-               break;
        case SFM_SPACE:
                *target = ' ';
                break;
index dc2f4cf08fe94a22def3309b4d35b4f0358a0ec3..5657b79dbc99f1e7fd87061a8270504d97743dfc 100644 (file)
@@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        }
 
        count = 0;
+       /*
+        * We know that all the name entries in the protocols array
+        * are short (< 16 bytes anyway) and are NUL terminated.
+        */
        for (i = 0; i < CIFS_NUM_PROT; i++) {
-               strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-               count += strlen(protocols[i].name) + 1;
-               /* null at end of source and target buffers anyway */
+               size_t len = strlen(protocols[i].name) + 1;
+
+               memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+               count += len;
        }
        inc_rfc1001_len(pSMB, count);
        pSMB->ByteCount = cpu_to_le16(count);
index c832a8a1970aabf11001237998df6f4c58208796..7aa08dba4719cde8c88c18bcb5ee4a4ddae733f1 100644 (file)
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
        if (tcon == NULL)
                return -ENOMEM;
 
-       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
 
        /* cannot fail */
        nls_codepage = load_nls_default();
index d32eaa4b243767a9c65f0fc19906fdc00e0e4ece..6e8765f445086d2208a567825b5fb6022e564900 100644 (file)
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_READ;
        oparms.create_options = CREATE_NOT_DIR;
+       if (backup_cred(cifs_sb))
+               oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
        oparms.disposition = FILE_OPEN;
        oparms.path = path;
        oparms.fid = &fid;
index dacb2c05674c5d7f11f7c4c9144646a95039d106..6926685e513cf4a3691df7efba36eb3dcb578b83 100644 (file)
@@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
+               size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
                if (get_bcc(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+                       if (data_offset >
+                           len - sizeof(struct file_notify_information)) {
+                               cifs_dbg(FYI, "invalid data_offset %u\n",
+                                        data_offset);
+                               return true;
+                       }
                        pnotify = (struct file_notify_information *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
                        cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
index eeab81c9452f86e61948198b0d938ea8b6a9a5dd..e169e1a5fd358c5ac9649f1f24e7a4fb876d3635 100644 (file)
@@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
 
                new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
                                pfData->FileNameLength;
-       } else
-               new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+       } else {
+               u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+               if (old_entry + next_offset < old_entry) {
+                       cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+                       return NULL;
+               }
+               new_entry = old_entry + next_offset;
+       }
        cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
        /* validate that new_entry is not past end of SMB */
        if (new_entry >= end_of_smb) {
index db0453660ff6c97d9d7ab66991d9b192fadcaed7..6a9c47541c53d0983a068703106ddacdbc834e04 100644 (file)
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                 * MacOS server pads after SMB2.1 write response with 3 bytes
                 * of junk. Other servers match RFC1001 len to actual
                 * SMB2/SMB3 frame length (header + smb2 response specific data)
-                * Some windows servers do too when compounding is used.
-                * Log the server error (once), but allow it and continue
+                * Some windows servers also pad up to 8 bytes when compounding.
+                * If pad is longer than eight bytes, log the server behavior
+                * (once), since may indicate a problem but allow it and continue
                 * since the frame is parseable.
                 */
                if (clc_len < len) {
-                       printk_once(KERN_WARNING
-                               "SMB2 server sent bad RFC1001 len %d not %d\n",
-                               len, clc_len);
+                       pr_warn_once(
+                            "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+                            len, clc_len, command, mid);
                        return 0;
                }
+               pr_warn_once(
+                       "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
+                       len, clc_len, command, mid);
 
                return 1;
        }
index 247a98e6c856eb79bd0f8110741efe2ce699099b..d954ce36b4734c06ca63e2fdb0343f6109d2ec57 100644 (file)
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_WRITE_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = fid;
        oparms.reconnect = false;
 
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
 struct smb_version_values smb3any_values = {
        .version_string = SMB3ANY_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
 struct smb_version_values smbdefault_values = {
        .version_string = SMBDEFAULT_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
 struct smb_version_values smb302_values = {
        .version_string = SMB302_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
 struct smb_version_values smb311_values = {
        .version_string = SMB311_VERSION_STRING,
        .protocol_id = SMB311_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
index 5740aa809be663547de90bf9e71ca863b74555b5..f54d07bda067c224c3bac6f511458330dbcc502e 100644 (file)
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
+       else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+                 (oparms->create_options & CREATE_NOT_FILE))
+               req->RequestedOplockLevel = *oplock; /* no srv lease support */
        else {
                rc = add_lease_context(server, iov, &n_iov,
                                       oparms->fid->lease_key, oplock);
@@ -2456,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        /* We check for obvious errors in the output buffer length and offset */
        if (*plen == 0)
                goto ioctl_exit; /* server returned no data */
-       else if (*plen > 0xFF00) {
+       else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
                cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
                *plen = 0;
                rc = -EIO;
                goto ioctl_exit;
        }
 
-       if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) {
+       if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
                cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
                        le32_to_cpu(rsp->OutputOffset));
                *plen = 0;
@@ -3574,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
        int len;
        unsigned int entrycount = 0;
        unsigned int next_offset = 0;
-       FILE_DIRECTORY_INFO *entryptr;
+       char *entryptr;
+       FILE_DIRECTORY_INFO *dir_info;
 
        if (bufstart == NULL)
                return 0;
 
-       entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+       entryptr = bufstart;
 
        while (1) {
-               entryptr = (FILE_DIRECTORY_INFO *)
-                                       ((char *)entryptr + next_offset);
-
-               if ((char *)entryptr + size > end_of_buf) {
+               if (entryptr + next_offset < entryptr ||
+                   entryptr + next_offset > end_of_buf ||
+                   entryptr + next_offset + size > end_of_buf) {
                        cifs_dbg(VFS, "malformed search entry would overflow\n");
                        break;
                }
 
-               len = le32_to_cpu(entryptr->FileNameLength);
-               if ((char *)entryptr + len + size > end_of_buf) {
+               entryptr = entryptr + next_offset;
+               dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+               len = le32_to_cpu(dir_info->FileNameLength);
+               if (entryptr + len < entryptr ||
+                   entryptr + len > end_of_buf ||
+                   entryptr + len + size > end_of_buf) {
                        cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
                                 end_of_buf);
                        break;
                }
 
-               *lastentry = (char *)entryptr;
+               *lastentry = entryptr;
                entrycount++;
 
-               next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+               next_offset = le32_to_cpu(dir_info->NextEntryOffset);
                if (!next_offset)
                        break;
        }
index 897b51e41d8f0f741c7514bc3ddd8c5bb4277b4a..f32d7125ad0f237d61173cd72383683ac380c4e4 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -226,8 +226,8 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
  *
  * Must be called with the i_pages lock held.
  */
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
-                                       pgoff_t index, void ***slotp)
+static void *__get_unlocked_mapping_entry(struct address_space *mapping,
+               pgoff_t index, void ***slotp, bool (*wait_fn)(void))
 {
        void *entry, **slot;
        struct wait_exceptional_entry_queue ewait;
@@ -237,6 +237,8 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
        ewait.wait.func = wake_exceptional_entry_func;
 
        for (;;) {
+               bool revalidate;
+
                entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
                                          &slot);
                if (!entry ||
@@ -251,14 +253,31 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
                prepare_to_wait_exclusive(wq, &ewait.wait,
                                          TASK_UNINTERRUPTIBLE);
                xa_unlock_irq(&mapping->i_pages);
-               schedule();
+               revalidate = wait_fn();
                finish_wait(wq, &ewait.wait);
                xa_lock_irq(&mapping->i_pages);
+               if (revalidate)
+                       return ERR_PTR(-EAGAIN);
        }
 }
 
-static void dax_unlock_mapping_entry(struct address_space *mapping,
-                                    pgoff_t index)
+static bool entry_wait(void)
+{
+       schedule();
+       /*
+        * Never return an ERR_PTR() from
+        * __get_unlocked_mapping_entry(), just keep looping.
+        */
+       return false;
+}
+
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+               pgoff_t index, void ***slotp)
+{
+       return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
+}
+
+static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
        void *entry, **slot;
 
@@ -277,7 +296,7 @@ static void dax_unlock_mapping_entry(struct address_space *mapping,
 static void put_locked_mapping_entry(struct address_space *mapping,
                pgoff_t index)
 {
-       dax_unlock_mapping_entry(mapping, index);
+       unlock_mapping_entry(mapping, index);
 }
 
 /*
@@ -319,18 +338,27 @@ static unsigned long dax_radix_end_pfn(void *entry)
        for (pfn = dax_radix_pfn(entry); \
                        pfn < dax_radix_end_pfn(entry); pfn++)
 
-static void dax_associate_entry(void *entry, struct address_space *mapping)
+/*
+ * TODO: for reflink+dax we need a way to associate a single page with
+ * multiple address_space instances at different linear_page_index()
+ * offsets.
+ */
+static void dax_associate_entry(void *entry, struct address_space *mapping,
+               struct vm_area_struct *vma, unsigned long address)
 {
-       unsigned long pfn;
+       unsigned long size = dax_entry_size(entry), pfn, index;
+       int i = 0;
 
        if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
                return;
 
+       index = linear_page_index(vma, address & ~(size - 1));
        for_each_mapped_pfn(entry, pfn) {
                struct page *page = pfn_to_page(pfn);
 
                WARN_ON_ONCE(page->mapping);
                page->mapping = mapping;
+               page->index = index + i++;
        }
 }
 
@@ -348,6 +376,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
                WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
                WARN_ON_ONCE(page->mapping && page->mapping != mapping);
                page->mapping = NULL;
+               page->index = 0;
        }
 }
 
@@ -364,6 +393,84 @@ static struct page *dax_busy_page(void *entry)
        return NULL;
 }
 
+static bool entry_wait_revalidate(void)
+{
+       rcu_read_unlock();
+       schedule();
+       rcu_read_lock();
+
+       /*
+        * Tell __get_unlocked_mapping_entry() to take a break, we need
+        * to revalidate page->mapping after dropping locks
+        */
+       return true;
+}
+
+bool dax_lock_mapping_entry(struct page *page)
+{
+       pgoff_t index;
+       struct inode *inode;
+       bool did_lock = false;
+       void *entry = NULL, **slot;
+       struct address_space *mapping;
+
+       rcu_read_lock();
+       for (;;) {
+               mapping = READ_ONCE(page->mapping);
+
+               if (!dax_mapping(mapping))
+                       break;
+
+               /*
+                * In the device-dax case there's no need to lock, a
+                * struct dev_pagemap pin is sufficient to keep the
+                * inode alive, and we assume we have dev_pagemap pin
+                * otherwise we would not have a valid pfn_to_page()
+                * translation.
+                */
+               inode = mapping->host;
+               if (S_ISCHR(inode->i_mode)) {
+                       did_lock = true;
+                       break;
+               }
+
+               xa_lock_irq(&mapping->i_pages);
+               if (mapping != page->mapping) {
+                       xa_unlock_irq(&mapping->i_pages);
+                       continue;
+               }
+               index = page->index;
+
+               entry = __get_unlocked_mapping_entry(mapping, index, &slot,
+                               entry_wait_revalidate);
+               if (!entry) {
+                       xa_unlock_irq(&mapping->i_pages);
+                       break;
+               } else if (IS_ERR(entry)) {
+                       WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
+                       continue;
+               }
+               lock_slot(mapping, slot);
+               did_lock = true;
+               xa_unlock_irq(&mapping->i_pages);
+               break;
+       }
+       rcu_read_unlock();
+
+       return did_lock;
+}
+
+void dax_unlock_mapping_entry(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       struct inode *inode = mapping->host;
+
+       if (S_ISCHR(inode->i_mode))
+               return;
+
+       unlock_mapping_entry(mapping, page->index);
+}
+
 /*
  * Find radix tree entry at given index. If it points to an exceptional entry,
  * return it with the radix tree entry locked. If the radix tree doesn't
@@ -655,7 +762,6 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 {
        void *vto, *kaddr;
        pgoff_t pgoff;
-       pfn_t pfn;
        long rc;
        int id;
 
@@ -664,7 +770,7 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+       rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
@@ -709,7 +815,7 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
        new_entry = dax_radix_locked_entry(pfn, flags);
        if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
                dax_disassociate_entry(entry, mapping, false);
-               dax_associate_entry(new_entry, mapping);
+               dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
        }
 
        if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
@@ -975,7 +1081,6 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
 {
        const sector_t sector = dax_iomap_sector(iomap, pos);
        pgoff_t pgoff;
-       void *kaddr;
        int id, rc;
        long length;
 
@@ -984,7 +1089,7 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
                return rc;
        id = dax_read_lock();
        length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
-                                  &kaddr, pfnp);
+                                  NULL, pfnp);
        if (length < 0) {
                rc = length;
                goto out;
@@ -1060,15 +1165,13 @@ int __dax_zero_page_range(struct block_device *bdev,
                pgoff_t pgoff;
                long rc, id;
                void *kaddr;
-               pfn_t pfn;
 
                rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
                if (rc)
                        return rc;
 
                id = dax_read_lock();
-               rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
-                               &pfn);
+               rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
                if (rc < 0) {
                        dax_read_unlock(id);
                        return rc;
@@ -1124,7 +1227,6 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                ssize_t map_len;
                pgoff_t pgoff;
                void *kaddr;
-               pfn_t pfn;
 
                if (fatal_signal_pending(current)) {
                        ret = -EINTR;
@@ -1136,7 +1238,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                        break;
 
                map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
-                               &kaddr, &pfn);
+                               &kaddr, NULL);
                if (map_len < 0) {
                        ret = map_len;
                        break;
index e072e955ce3334e44bdc529c2650518a6ca74a6c..c53814539070d5ac5d2196e2f4b7383055e11dd1 100644 (file)
@@ -46,7 +46,7 @@ static int pty_limit = NR_UNIX98_PTY_DEFAULT;
 static int pty_reserve = NR_UNIX98_PTY_RESERVE;
 static int pty_limit_min;
 static int pty_limit_max = INT_MAX;
-static int pty_count;
+static atomic_t pty_count = ATOMIC_INIT(0);
 
 static struct ctl_table pty_table[] = {
        {
@@ -93,8 +93,6 @@ static struct ctl_table pty_root_table[] = {
        {}
 };
 
-static DEFINE_MUTEX(allocated_ptys_lock);
-
 struct pts_mount_opts {
        int setuid;
        int setgid;
@@ -533,44 +531,25 @@ static struct file_system_type devpts_fs_type = {
 
 int devpts_new_index(struct pts_fs_info *fsi)
 {
-       int index;
-       int ida_ret;
-
-retry:
-       if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
-               return -ENOMEM;
-
-       mutex_lock(&allocated_ptys_lock);
-       if (pty_count >= (pty_limit -
-                         (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
-               mutex_unlock(&allocated_ptys_lock);
-               return -ENOSPC;
-       }
+       int index = -ENOSPC;
 
-       ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
-       if (ida_ret < 0) {
-               mutex_unlock(&allocated_ptys_lock);
-               if (ida_ret == -EAGAIN)
-                       goto retry;
-               return -EIO;
-       }
+       if (atomic_inc_return(&pty_count) >= (pty_limit -
+                         (fsi->mount_opts.reserve ? 0 : pty_reserve)))
+               goto out;
 
-       if (index >= fsi->mount_opts.max) {
-               ida_remove(&fsi->allocated_ptys, index);
-               mutex_unlock(&allocated_ptys_lock);
-               return -ENOSPC;
-       }
-       pty_count++;
-       mutex_unlock(&allocated_ptys_lock);
+       index = ida_alloc_max(&fsi->allocated_ptys, fsi->mount_opts.max - 1,
+                       GFP_KERNEL);
+
+out:
+       if (index < 0)
+               atomic_dec(&pty_count);
        return index;
 }
 
 void devpts_kill_index(struct pts_fs_info *fsi, int idx)
 {
-       mutex_lock(&allocated_ptys_lock);
-       ida_remove(&fsi->allocated_ptys, idx);
-       pty_count--;
-       mutex_unlock(&allocated_ptys_lock);
+       ida_free(&fsi->allocated_ptys, idx);
+       atomic_dec(&pty_count);
 }
 
 /**
index e2902d394f1badb78bd4c96cbb45d3814fb74265..f93f9881ec184c351e69af715a849d1a01562b95 100644 (file)
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
        else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
                error_msg = "rec_len is too small for name_len";
        else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
+               error_msg = "directory entry overrun";
        else if (unlikely(le32_to_cpu(de->inode) >
                        le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
                error_msg = "inode out of bounds";
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
 
        if (filp)
                ext4_error_file(filp, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                               error_msg, offset, le32_to_cpu(de->inode),
+                               rlen, de->name_len, size);
        else
                ext4_error_inode(dir, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                                error_msg, offset, le32_to_cpu(de->inode),
+                                rlen, de->name_len, size);
 
        return 1;
 }
index 0f0edd1cd0cd259f81eb0349e25ae9efb34641cc..caff935fbeb8f100049bf0aab09f7e14c53fa40e 100644 (file)
 #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
+#include <linux/compiler.h>
+
+/* Until this gets included into linux/compiler-gcc.h */
+#ifndef __nonstring
+#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
+#define __nonstring __attribute__((nonstring))
+#else
+#define __nonstring
+#endif
+#endif
+
 /*
  * The fourth extended filesystem constants/structures
  */
@@ -675,6 +686,9 @@ enum {
 /* Max physical block we can address w/o extents */
 #define EXT4_MAX_BLOCK_FILE_PHYS       0xFFFFFFFF
 
+/* Max logical block we can support */
+#define EXT4_MAX_LOGICAL_BLOCK         0xFFFFFFFF
+
 /*
  * Structure of an inode on the disk
  */
@@ -1226,7 +1240,7 @@ struct ext4_super_block {
        __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
 /*68*/ __u8    s_uuid[16];             /* 128-bit uuid for volume */
 /*78*/ char    s_volume_name[16];      /* volume name */
-/*88*/ char    s_last_mounted[64];     /* directory where last mounted */
+/*88*/ char    s_last_mounted[64] __nonstring; /* directory where last mounted */
 /*C8*/ __le32  s_algorithm_usage_bitmap; /* For compression */
        /*
         * Performance hints.  Directory preallocation should only
@@ -1277,13 +1291,13 @@ struct ext4_super_block {
        __le32  s_first_error_time;     /* first time an error happened */
        __le32  s_first_error_ino;      /* inode involved in first error */
        __le64  s_first_error_block;    /* block involved of first error */
-       __u8    s_first_error_func[32]; /* function where the error happened */
+       __u8    s_first_error_func[32] __nonstring;     /* function where the error happened */
        __le32  s_first_error_line;     /* line number where error happened */
        __le32  s_last_error_time;      /* most recent time of an error */
        __le32  s_last_error_ino;       /* inode involved in last error */
        __le32  s_last_error_line;      /* line number where error happened */
        __le64  s_last_error_block;     /* block involved of last error */
-       __u8    s_last_error_func[32];  /* function where the error happened */
+       __u8    s_last_error_func[32] __nonstring;      /* function where the error happened */
 #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
        __u8    s_mount_opts[64];
        __le32  s_usr_quota_inum;       /* inode for tracking user quota */
index 3543fe80a3c442364d752fcbb74a7edd4df97dc9..7b47360227612926f1da1f3f95f94d3e4f4f6ab0 100644 (file)
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
 {
        int err, inline_size;
        struct ext4_iloc iloc;
+       size_t inline_len;
        void *inline_pos;
        unsigned int offset;
        struct ext4_dir_entry_2 *de;
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
                goto out;
        }
 
+       inline_len = ext4_get_inline_size(dir);
        offset = EXT4_INLINE_DOTDOT_SIZE;
-       while (offset < dir->i_size) {
+       while (offset < inline_len) {
                de = ext4_get_inline_entry(dir, &iloc, offset,
                                           &inline_pos, &inline_size);
                if (ext4_check_dir_entry(dir, NULL, de,
index d0dd585add6a005684c569d6edaecc481f926f2d..d767e993591d93f335ae85b876a5c12ceae35b7a 100644 (file)
@@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        unsigned int blkbits = inode->i_blkbits;
-       unsigned long first_block = offset >> blkbits;
-       unsigned long last_block = (offset + length - 1) >> blkbits;
+       unsigned long first_block, last_block;
        struct ext4_map_blocks map;
        bool delalloc = false;
        int ret;
 
+       if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+               return -EINVAL;
+       first_block = offset >> blkbits;
+       last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
+                          EXT4_MAX_LOGICAL_BLOCK);
 
        if (flags & IOMAP_REPORT) {
                if (ext4_has_inline_data(inode)) {
@@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = {
        .writepages             = ext4_dax_writepages,
        .direct_IO              = noop_direct_IO,
        .set_page_dirty         = noop_set_page_dirty,
+       .bmap                   = ext4_bmap,
        .invalidatepage         = noop_invalidatepage,
 };
 
@@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
        return 0;
 }
 
-static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+static void ext4_wait_dax_page(struct ext4_inode_info *ei)
 {
-       *did_unlock = true;
        up_write(&ei->i_mmap_sem);
        schedule();
        down_write(&ei->i_mmap_sem);
@@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode)
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct page *page;
-       bool retry;
        int error;
 
        if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
                return -EINVAL;
 
        do {
-               retry = false;
                page = dax_layout_busy_page(inode->i_mapping);
                if (!page)
                        return 0;
@@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode)
                error = ___wait_var_event(&page->_refcount,
                                atomic_read(&page->_refcount) == 1,
                                TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(ei, &retry));
-       } while (error == 0 && retry);
+                               ext4_wait_dax_page(ei));
+       } while (error == 0);
 
        return error;
 }
@@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                 * not initialized on a new filesystem. */
        }
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext4_set_inode_flags(inode);
        inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
        ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
        if (ext4_has_feature_64bit(sb))
@@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                goto bad_inode;
        }
        brelse(iloc.bh);
-       ext4_set_inode_flags(inode);
 
        unlock_new_inode(inode);
        return inode;
index 39b07c2d3384013298a7533a4353dce66e0ab842..2305b4374fd3e020eea80de3293040fd59d5de84 100644 (file)
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
         */
        sb_start_write(sb);
        ext4_mmp_csum_set(sb, mmp);
-       mark_buffer_dirty(bh);
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
index 116ff68c5bd4337b7cb751330d091fcab24eecba..377d516c475f06400452f6e83642baa18661486a 100644 (file)
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        int credits;
        u8 old_file_type;
 
+       if (new.inode && new.inode->i_nlink == 0) {
+               EXT4_ERROR_INODE(new.inode,
+                                "target of rename is already freed");
+               return -EFSCORRUPTED;
+       }
+
        if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
            (!projid_eq(EXT4_I(new_dir)->i_projid,
                        EXT4_I(old_dentry->d_inode)->i_projid)))
index e5fb38451a733cc947a5c9f77c478545b5699013..ebbc663d07985038ef17520fb41c5fae0e5d3637 100644 (file)
@@ -19,6 +19,7 @@
 
 int ext4_resize_begin(struct super_block *sb)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        int ret = 0;
 
        if (!capable(CAP_SYS_RESOURCE))
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
          */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+       if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
                ext4_warning(sb, "won't resize using backup superblock at %llu",
                        (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1986,6 +1987,26 @@ retry:
                }
        }
 
+       /*
+        * Make sure the last group has enough space so that it's
+        * guaranteed to have enough space for all metadata blocks
+        * that it might need to hold.  (We might not need to store
+        * the inode table blocks in the last block group, but there
+        * will be cases where this might be needed.)
+        */
+       if ((ext4_group_first_block_no(sb, n_group) +
+            ext4_group_overhead_blocks(sb, n_group) + 2 +
+            sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+               n_blocks_count = ext4_group_first_block_no(sb, n_group);
+               n_group--;
+               n_blocks_count_retry = 0;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
+               goto retry;
+       }
+
        /* extend the last group */
        if (n_group == o_group)
                add = n_blocks_count - o_blocks_count;
index 5863fd22e90bb20c102274f3e320637858bdef87..1145109968efd356a812744537d94d929b62f7ce 100644 (file)
@@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
                SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
        if (test_opt(sb, DATA_ERR_ABORT))
                SEQ_OPTS_PUTS("data_err=abort");
+       if (DUMMY_ENCRYPTION_ENABLED(sbi))
+               SEQ_OPTS_PUTS("test_dummy_encryption");
 
        ext4_show_quota_options(seq, sb);
        return 0;
@@ -4378,11 +4380,13 @@ no_journal:
        block = ext4_count_free_clusters(sb);
        ext4_free_blocks_count_set(sbi->s_es, 
                                   EXT4_C2B(sbi, block));
+       ext4_superblock_csum_set(sb);
        err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
                                  GFP_KERNEL);
        if (!err) {
                unsigned long freei = ext4_count_free_inodes(sb);
                sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+               ext4_superblock_csum_set(sb);
                err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
                                          GFP_KERNEL);
        }
index ec3fba7d492f483e14accbfd98d5b63550f2591c..488a9e7f8f66020f4424b322db62eacdcbbdd057 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mpage.h>
 #include <linux/user_namespace.h>
 #include <linux/seq_file.h>
+#include <linux/blkdev.h>
 
 #include "isofs.h"
 #include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
        /*
         * What if bugger tells us to go beyond page size?
         */
+       if (bdev_logical_block_size(s->s_bdev) > 2048) {
+               printk(KERN_WARNING
+                      "ISOFS: unsupported/invalid hardware sector size %d\n",
+                       bdev_logical_block_size(s->s_bdev));
+               goto out_freesbi;
+       }
        opt.blocksize = sb_min_blocksize(s, opt.blocksize);
 
        sbi->s_high_sierra = 0; /* default is iso9660 */
index 725d6935fab9f8fb7d80d225b9af52331c702b34..99186556f8d34ecfc89f27fb329b35dc2e42d114 100644 (file)
@@ -61,9 +61,6 @@ __setup("mphash_entries=", set_mphash_entries);
 static u64 event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
-static DEFINE_SPINLOCK(mnt_id_lock);
-static int mnt_id_start = 0;
-static int mnt_group_start = 1;
 
 static struct hlist_head *mount_hashtable __read_mostly;
 static struct hlist_head *mountpoint_hashtable __read_mostly;
@@ -101,50 +98,30 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
 
 static int mnt_alloc_id(struct mount *mnt)
 {
-       int res;
+       int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
 
-retry:
-       ida_pre_get(&mnt_id_ida, GFP_KERNEL);
-       spin_lock(&mnt_id_lock);
-       res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
-       if (!res)
-               mnt_id_start = mnt->mnt_id + 1;
-       spin_unlock(&mnt_id_lock);
-       if (res == -EAGAIN)
-               goto retry;
-
-       return res;
+       if (res < 0)
+               return res;
+       mnt->mnt_id = res;
+       return 0;
 }
 
 static void mnt_free_id(struct mount *mnt)
 {
-       int id = mnt->mnt_id;
-       spin_lock(&mnt_id_lock);
-       ida_remove(&mnt_id_ida, id);
-       if (mnt_id_start > id)
-               mnt_id_start = id;
-       spin_unlock(&mnt_id_lock);
+       ida_free(&mnt_id_ida, mnt->mnt_id);
 }
 
 /*
  * Allocate a new peer group ID
- *
- * mnt_group_ida is protected by namespace_sem
  */
 static int mnt_alloc_group_id(struct mount *mnt)
 {
-       int res;
+       int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
 
-       if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
-               return -ENOMEM;
-
-       res = ida_get_new_above(&mnt_group_ida,
-                               mnt_group_start,
-                               &mnt->mnt_group_id);
-       if (!res)
-               mnt_group_start = mnt->mnt_group_id + 1;
-
-       return res;
+       if (res < 0)
+               return res;
+       mnt->mnt_group_id = res;
+       return 0;
 }
 
 /*
@@ -152,10 +129,7 @@ static int mnt_alloc_group_id(struct mount *mnt)
  */
 void mnt_release_group_id(struct mount *mnt)
 {
-       int id = mnt->mnt_group_id;
-       ida_remove(&mnt_group_ida, id);
-       if (mnt_group_start > id)
-               mnt_group_start = id;
+       ida_free(&mnt_group_ida, mnt->mnt_group_id);
        mnt->mnt_group_id = 0;
 }
 
index 34830f6457ea252a2b56b9ef379b6ca3551f4f46..8220a168282e054164cb5b3bb8534f76be89faa7 100644 (file)
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
        write_sequnlock(&state->seqlock);
 }
 
+static void nfs_state_clear_delegation(struct nfs4_state *state)
+{
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       write_sequnlock(&state->seqlock);
+}
+
 static int update_open_stateid(struct nfs4_state *state,
                const nfs4_stateid *open_stateid,
                const nfs4_stateid *delegation,
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
        case FMODE_WRITE:
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
                const nfs4_stateid *stateid)
 {
        nfs_remove_bad_delegation(state->inode, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
 }
 
 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
        delegation = rcu_dereference(NFS_I(state->inode)->delegation);
        if (delegation == NULL) {
                rcu_read_unlock();
+               nfs_state_clear_delegation(state);
                return;
        }
 
        nfs4_stateid_copy(&stateid, &delegation->stateid);
-       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
-               !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
-                       &delegation->flags)) {
+       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+               rcu_read_unlock();
+               nfs_state_clear_delegation(state);
+               return;
+       }
+
+       if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+                               &delegation->flags)) {
                rcu_read_unlock();
-               nfs_finish_clear_delegation_stateid(state, &stateid);
                return;
        }
 
index 3df0eb52da1c97d3802c34702068a36f31f02d69..40a08cd483f051072508b53eee362167ce26904f 100644 (file)
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
 
        if (!nfs4_state_mark_reclaim_nograce(clp, state))
                return -EBADF;
+       nfs_inode_find_delegation_state_and_recover(state->inode,
+                       &state->stateid);
        dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
                        clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
index a275fba93170c2ba190987e8b546f194868e4394..b1483b303e0bf35a268bccad1823fd6b89015e97 100644 (file)
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
index e8f232de484f4d591666c6789b9932404e6fc5cb..7d9a51e6b847c65df159d6632a98ac891370f80f 100644 (file)
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
        return ret;
 }
 
-static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
 {
        /*
         * send layoutcommit as it can hold up layoutreturn due to lseg
         * reference
         */
        pnfs_layoutcommit_inode(lo->plh_inode, false);
-       return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+       return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
                                   nfs_wait_bit_killable,
-                                  TASK_UNINTERRUPTIBLE);
+                                  TASK_KILLABLE);
 }
 
 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
-       nfs4_client_recover_expired_lease(clp);
+       lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+       if (IS_ERR(lseg))
+               goto out;
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1863,9 +1865,9 @@ lookup_again:
        if (list_empty(&lo->plh_segs) &&
            atomic_read(&lo->plh_outstanding) != 0) {
                spin_unlock(&ino->i_lock);
-               if (wait_var_event_killable(&lo->plh_outstanding,
-                                       atomic_read(&lo->plh_outstanding) == 0
-                                       || !list_empty(&lo->plh_segs)))
+               lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+                                       atomic_read(&lo->plh_outstanding)));
+               if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
                        goto out_put_layout_hdr;
                pnfs_put_layout_hdr(lo);
                goto lookup_again;
@@ -1898,8 +1900,11 @@ lookup_again:
                if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
                                     &lo->plh_flags)) {
                        spin_unlock(&ino->i_lock);
-                       wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
-                                   TASK_UNINTERRUPTIBLE);
+                       lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
+                                               NFS_LAYOUT_FIRST_LAYOUTGET,
+                                               TASK_KILLABLE));
+                       if (IS_ERR(lseg))
+                               goto out_put_layout_hdr;
                        pnfs_put_layout_hdr(lo);
                        dprintk("%s retrying\n", __func__);
                        goto lookup_again;
@@ -1925,7 +1930,8 @@ lookup_again:
        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                spin_unlock(&ino->i_lock);
                dprintk("%s wait for layoutreturn\n", __func__);
-               if (pnfs_prepare_to_retry_layoutget(lo)) {
+               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+               if (!IS_ERR(lseg)) {
                        if (first)
                                pnfs_clear_first_layoutget(lo);
                        pnfs_put_layout_hdr(lo);
index 03b8ba933eb2a3dcc781eaf9e57594da5a40f48b..235b959fc2b3a706866fd9e96447764641d94536 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * alloc.c - NILFS dat/inode allocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 05149e606a78a8dfd8f4a701863eccddb0e4979e..0303c3968cee06d37c25fa18c9a958aa69dea27a 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 01fb1831ca250b43172c7b046a2447b590feda0e..fb5a9a8a13cf7d734c0c13f38df097786c5ba5a1 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * bmap.c - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2b6ffbe5997a2f8bc77e80621999d4f1d651e02f..2c63858e81c9ce089820e9803a7a98f6121c524b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * bmap.h - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dec98cab729dd90fdb491e5ac770c8bf99867aec..ebb24a314f43129fce712b87ee5d44e9e38e2979 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btnode.c - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Seiji Kihara.
  * Fully revised by Ryusuke Konishi for stabilization and simplification.
  *
index 4e8aaa1aeb65db70bc1f7fcc9faae7a81f96d09c..0f88dbc9bcb3ef4536ff13a28321d432070a8413 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btnode.h - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara.
  * Revised by Ryusuke Konishi.
  */
index 16a7a67a11c9e2098dde76c6eecee10fb1838b47..23e043eca237bd2e760882f5783b28a51558ae64 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btree.c - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2184e47fa4bf6ff297feea6caa7fce3fc761d31c..d1421b646ce46bbc4afd851e7b01175f5b8d53be 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btree.h - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index a15a1601e931dab6139a9f8dcee0428f06e1f485..8d41311b5db4b45b5a1536a10c90015e9e60e01e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * cpfile.c - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 6eca972f9673cc676e9469b848567727f1fb61b2..6336222df24a8fee683afca437db55a9c29b3e4c 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * cpfile.h - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dffedb2f88179a321b13ea3901c9fe7efd03ea22..6f4066636be9a3eba91d78852246edabdffcff0d 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dat.c - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 57dc6cf466d02d3d082fb85125b37338ebfad94c..b17ee34580ae69dc4898785552cca5c83fe56535 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * dat.h - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 582831ab3eb95dee645907bf4be2402f62087748..81394e22d0a09a1ee88ac45850e5d63e98301001 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dir.c - NILFS directory entry operations
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji.
  */
 /*
index 96e3ed0d9652b67de00717f0e9a9042b3106620d..533e24ea3a88d208e8e39bf1a14a53852accb907 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * direct.c - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index cfe85e848bba1e9e07aea1ea93dad3b3d520b342..ec9a23c77994e76954ef0921c7607c550dc91215 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * direct.h - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 7da0fac71dc26a73d33877d9498c8007b17d11e4..64bc81363c6cc0437dd4c757615af395c85b0475 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * file.c - NILFS regular file handling primitives including fsync().
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji and Ryusuke Konishi.
  */
 
index 853a831dcde0890481d45fdfadf89ff8a35b7671..aa3c328ee189c4409c1c4fe497677b2278c12553 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * gcinode.c - dummy inodes to buffer blocks for garbage collection
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
  * Revised by Ryusuke Konishi.
  *
index b8fa45c20c63fdbded97c3474ac68438529cfca6..4140d232cadc04dbf65aa6a76b4660f9d30c3537 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ifile.c - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 188b94fe0ec5fe3f313160b0c8568e269b582253..a1e1e5711a054fd2fc9472234c34dbd88163adb0 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * ifile.h - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 6a612d832e7de41f1273805fd8db2ad0f0433221..671085512e0fde9e8be3274629db21e8c0561b96 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * inode.c - NILFS inode operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 1d2c3d7711feb94dea6ce3a0a1540a41a1bef1d2..9b96d79eea6c81247380142fb9db0ef1ad0c51cb 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ioctl.c - NILFS ioctl operations.
  *
  * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index c6bc1033e7d2ccf3795bde38b18a5787e2644d1d..700870a92bc4a1a2499e2ffef381613f8bad6e1a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * mdt.c - meta data file for NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 3f67f3932097b408fd6d7161a90e62170992a71a..e77aea4bb921c37c212558b11137022e796a5bae 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * mdt.h - NILFS meta data file prototype and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index dd52d3f82e8d673bd43dd99dfdfff6658b959357..9fe6d4ab74f01ef3d7055d4bb1ace9a73e441ad7 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * namei.c - NILFS pathname lookup operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
  */
 /*
index 33f8c8fc96e8e772bbe163da521e7ca2e7fa65a0..a2f247b6a209ec250cb7da2c1cbede10c14cf03f 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * nilfs.h - NILFS local header file.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato and Ryusuke Konishi.
  */
 
index 4cb850a6f1c2c64794449e922c6e40f69aa585b6..329a056b73b178958e71308c35b945a0c39de834 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * page.c - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index f3687c958fa848c20a3d17cab71b5a0da77de5d1..62b9bb469e92f3e58bd2378a8da979acb8364466 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * page.h - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index 5139efed1888294a425499ceba313f3747567b56..140b663e91c7fbf826702e9a27fce936ac80333f 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * recovery.c - NILFS recovery logic
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 68cb9e4740b4e818836a56dcdc24a05d6e3e5f51..20c479b5e41b8c7a9bbd09d309cf33aa51e3a90d 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segbuf.c - NILFS segment buffer
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 10e16935fff655c914c34055d526eccc172b409b..9bea1bd59041e0e0b69a379398f6877face96b3b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segbuf.h - NILFS Segment buffer prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 0953635e7d48e7915b35cd63c44ff1b6bfcc8e68..445eef41bfaf00a948b5c29f3041703bba369dfa 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segment.c - NILFS segment constructor.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 04634e3e3d583af75d1dd77329582c9f8bd309fc..f5cf5308f3fcad1be6f0863c560ad30e04059c59 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segment.h - NILFS Segment constructor prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index c7fa139d50e8287e481586fe177df2c03ef9e6de..bf3f8f05c89b3dbb10849111e7a0815b2a3786e5 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sufile.c - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  * Revised by Ryusuke Konishi.
  */
index 673a891350f49599e30a5666afd85973a5419522..c4e2c7a7add1d5d1d5f89948507810c468c0b403 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sufile.h - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 1b9067cf451125941c2140c94aba622b875c0dde..26290aa1023f31950017cdba5c31fb589934e701 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * super.c - NILFS module and super block management.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 /*
index 4b25837e77245902dee4a3674206adf1262c83b9..e60be7bb55b0b870e99d7844f6e3495d7d17e365 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sysfs.c - sysfs support implementation.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 648cedf9c06ec75c3e783d0d5e6f5b91ae1891d9..d001eb862daece4420aacc6fa450a8a35d64d7f9 100644 (file)
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sysfs.h - sysfs support declarations.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 1a85317e83f0f751332118daf559fc9ba79fb560..484785cdf96e220525a57222190ac3fe0bca7f71 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * the_nilfs.c - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 36da1779f9766f1e32bc8f8c2de6a7d93413403f..380a543c5b19bd424782e8393ecc285af648396e 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * the_nilfs.h - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index f174397b63a046f32ca24a7be30080c4ff5fe009..ababdbfab537ef259b20079cd74d7f036e293fc7 100644 (file)
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
 
        iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
 
-       if ((mask & FS_MODIFY) ||
-           (test_mask & to_tell->i_fsnotify_mask)) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
-       }
-
-       if (mnt && ((mask & FS_MODIFY) ||
-                   (test_mask & mnt->mnt_fsnotify_mask))) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+               fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       if (mnt) {
                iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
                        fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
        }
index 05506d60131c69d546f574e9633f7d01039c70c6..59cdb27826defe2ddac7023438132c1b80a49c29 100644 (file)
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
        struct fsnotify_mark *mark;
 
        assert_spin_locked(&conn->lock);
+       /* We can get detached connector here when inode is getting unlinked. */
+       if (!fsnotify_valid_obj_type(conn->type))
+               return;
        hlist_for_each_entry(mark, &conn->list, obj_list) {
                if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
                        new_mask |= mark->mask;
        }
-       if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
-               return;
-
        *fsnotify_conn_mask_p(conn) = new_mask;
 }
 
index d9ebe11c89909b8587fa3d7faa491c75426bed87..1d098c3c00e023540d6f0665720390647945af58 100644 (file)
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                 * for this bh as it's not marked locally
                                 * uptodate. */
                                status = -EIO;
+                               clear_buffer_needs_validate(bh);
                                put_bh(bh);
                                bhs[i] = NULL;
                                continue;
index 32e9282893c93467df0f967be3d9ee848b373244..aeaefd2a551b015d63b47cbe9ab25a204a9d3cb3 100644 (file)
@@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file)
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
-       file->f_mapping = realfile->f_mapping;
-
        file->private_data = realfile;
 
        return 0;
@@ -334,6 +331,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
        return ret;
 }
 
+static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       int ret;
+
+       ret = ovl_real_fdget(file, &real);
+       if (ret)
+               return ret;
+
+       old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       ret = vfs_fadvise(real.file, offset, len, advice);
+       revert_creds(old_cred);
+
+       fdput(real);
+
+       return ret;
+}
+
 static long ovl_real_ioctl(struct file *file, unsigned int cmd,
                           unsigned long arg)
 {
@@ -502,6 +518,7 @@ const struct file_operations ovl_file_operations = {
        .fsync          = ovl_fsync,
        .mmap           = ovl_mmap,
        .fallocate      = ovl_fallocate,
+       .fadvise        = ovl_fadvise,
        .unlocked_ioctl = ovl_ioctl,
        .compat_ioctl   = ovl_compat_ioctl,
 
index e0bb217c01e2a6c8389bf9ec2dfb2f56f0fab7c8..b6ac545b5a32188297352dd0793565423fee2fcb 100644 (file)
@@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -EOPNOTSUPP;
 
        old_cred = ovl_override_creds(inode->i_sb);
+
+       if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+               filemap_write_and_wait(realinode->i_mapping);
+
        err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
        revert_creds(old_cred);
 
@@ -500,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = {
        .update_time    = ovl_update_time,
 };
 
+const struct address_space_operations ovl_aops = {
+       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
+       .direct_IO              = noop_direct_IO,
+};
+
 /*
  * It is possible to stack overlayfs instance on top of another
  * overlayfs instance as lower layer. We need to annonate the
@@ -571,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
        case S_IFREG:
                inode->i_op = &ovl_file_inode_operations;
                inode->i_fop = &ovl_file_operations;
+               inode->i_mapping->a_ops = &ovl_aops;
                break;
 
        case S_IFDIR:
index 2e0fc93c2c06646ccf5c67c43836f9d4989da501..30adc9d408a0df84455b86811ee468faf439c0c1 100644 (file)
@@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        if (err)
                goto out;
 
-       err = -EBUSY;
-       if (ovl_inuse_trylock(upperpath->dentry)) {
-               ofs->upperdir_locked = true;
-       } else if (ofs->config.index) {
-               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
-               goto out;
-       } else {
-               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
-       }
-
        upper_mnt = clone_private_mount(upperpath);
        err = PTR_ERR(upper_mnt);
        if (IS_ERR(upper_mnt)) {
@@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        /* Don't inherit atime flags */
        upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
        ofs->upper_mnt = upper_mnt;
+
+       err = -EBUSY;
+       if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+               ofs->upperdir_locked = true;
+       } else if (ofs->config.index) {
+               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+               goto out;
+       } else {
+               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+       }
+
        err = 0;
 out:
        return err;
@@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                goto out;
        }
 
+       ofs->workbasedir = dget(workpath.dentry);
+
        err = -EBUSY;
-       if (ovl_inuse_trylock(workpath.dentry)) {
+       if (ovl_inuse_trylock(ofs->workbasedir)) {
                ofs->workdir_locked = true;
        } else if (ofs->config.index) {
                pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
@@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
        }
 
-       ofs->workbasedir = dget(workpath.dentry);
        err = ovl_make_workdir(ofs, &workpath);
        if (err)
                goto out;
index 80464432dfe6459eeec7d41498a65abb4f9d0dbe..d297fe4472a960b29fd62018535400c5b96a975f 100644 (file)
@@ -359,8 +359,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                        phdr->p_type = PT_LOAD;
                        phdr->p_flags = PF_R | PF_W | PF_X;
                        phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
-                       phdr->p_vaddr = (size_t)m->addr;
-                       if (m->type == KCORE_RAM)
+                       if (m->type == KCORE_REMAP)
+                               phdr->p_vaddr = (size_t)m->vaddr;
+                       else
+                               phdr->p_vaddr = (size_t)m->addr;
+                       if (m->type == KCORE_RAM || m->type == KCORE_REMAP)
                                phdr->p_paddr = __pa(m->addr);
                        else if (m->type == KCORE_TEXT)
                                phdr->p_paddr = __pa_symbol(m->addr);
@@ -461,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                                ret = -EFAULT;
                                goto out;
                        }
+                       m = NULL;       /* skip the list anchor */
                } else if (m->type == KCORE_VMALLOC) {
                        vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
index 951a14edcf518bb95a5ec01c9e98ae678cb1e67f..0792595ebcfb65869b5e82c5eec87e2bc89c1efc 100644 (file)
@@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
        vaddr = vmap(pages, page_count, VM_MAP, prot);
        kfree(pages);
 
-       return vaddr;
+       /*
+        * Since vmap() uses page granularity, we must add the offset
+        * into the page here, to get the byte granularity address
+        * into the mapping to represent the actual "start" location.
+        */
+       return vaddr + offset_in_page(start);
 }
 
 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
        else
                va = ioremap_wc(start, size);
 
+       /*
+        * Since request_mem_region() and ioremap() are byte-granularity
+        * there is no need handle anything special like we do when the
+        * vmap() case in persistent_ram_vmap() above.
+        */
        return va;
 }
 
@@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
                return -ENOMEM;
        }
 
-       prz->buffer = prz->vaddr + offset_in_page(start);
+       prz->buffer = prz->vaddr;
        prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
 
        return 0;
@@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 
        if (prz->vaddr) {
                if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
-                       vunmap(prz->vaddr);
+                       /* We must vunmap() at page-granularity. */
+                       vunmap(prz->vaddr - offset_in_page(prz->paddr));
                } else {
                        iounmap(prz->vaddr);
                        release_mem_region(prz->paddr, prz->size);
index 860bfbe7a07aa5b9a4491365b48655122fa5eb60..f0cbf58ad4dade2129c19d5dba42c8bb4eed276d 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/quotaops.h>
 #include <linux/types.h>
 #include <linux/writeback.h>
+#include <linux/nospec.h>
 
 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
                                     qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
        struct if_dqinfo uinfo;
        int ret;
 
-       /* This checks whether qc_state has enough entries... */
-       BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
        if (!sb->s_qcop->get_state)
                return -ENOSYS;
        ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
         * GETXSTATE quotactl has space for just one set of time limits so
         * report them for the first enabled quota type
         */
-       for (type = 0; type < XQM_MAXQUOTAS; type++)
+       for (type = 0; type < MAXQUOTAS; type++)
                if (state.s_state[type].flags & QCI_ACCT_ENABLED)
                        break;
-       BUG_ON(type == XQM_MAXQUOTAS);
+       BUG_ON(type == MAXQUOTAS);
        fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
        fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
        fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
         * GETXSTATV quotactl has space for just one set of time limits so
         * report them for the first enabled quota type
         */
-       for (type = 0; type < XQM_MAXQUOTAS; type++)
+       for (type = 0; type < MAXQUOTAS; type++)
                if (state.s_state[type].flags & QCI_ACCT_ENABLED)
                        break;
-       BUG_ON(type == XQM_MAXQUOTAS);
+       BUG_ON(type == MAXQUOTAS);
        fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
        fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
        fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
 {
        int ret;
 
-       if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
+       if (type >= MAXQUOTAS)
                return -EINVAL;
+       type = array_index_nospec(type, MAXQUOTAS);
        /*
         * Quota not supported on this fs? Check this before s_quota_types
         * since they needn't be set if quota is not supported at all.
index 7429588d6b4922574f05d43cbfbc6b5ad01ab648..f3a8c008e16430be1ba08d9f575c1df9faeb0f0a 100644 (file)
@@ -981,58 +981,42 @@ void emergency_thaw_all(void)
        }
 }
 
-/*
- * Unnamed block devices are dummy devices used by virtual
- * filesystems which don't use real block-devices.  -- jrs
- */
-
 static DEFINE_IDA(unnamed_dev_ida);
-static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
-/* Many userspace utilities consider an FSID of 0 invalid.
- * Always return at least 1 from get_anon_bdev.
- */
-static int unnamed_dev_start = 1;
 
+/**
+ * get_anon_bdev - Allocate a block device for filesystems which don't have one.
+ * @p: Pointer to a dev_t.
+ *
+ * Filesystems which don't use real block devices can call this function
+ * to allocate a virtual block device.
+ *
+ * Context: Any context.  Frequently called while holding sb_lock.
+ * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
+ * or -ENOMEM if memory allocation failed.
+ */
 int get_anon_bdev(dev_t *p)
 {
        int dev;
-       int error;
 
- retry:
-       if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
-               return -ENOMEM;
-       spin_lock(&unnamed_dev_lock);
-       error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
-       if (!error)
-               unnamed_dev_start = dev + 1;
-       spin_unlock(&unnamed_dev_lock);
-       if (error == -EAGAIN)
-               /* We raced and lost with another CPU. */
-               goto retry;
-       else if (error)
-               return -EAGAIN;
-
-       if (dev >= (1 << MINORBITS)) {
-               spin_lock(&unnamed_dev_lock);
-               ida_remove(&unnamed_dev_ida, dev);
-               if (unnamed_dev_start > dev)
-                       unnamed_dev_start = dev;
-               spin_unlock(&unnamed_dev_lock);
-               return -EMFILE;
-       }
-       *p = MKDEV(0, dev & MINORMASK);
+       /*
+        * Many userspace utilities consider an FSID of 0 invalid.
+        * Always return at least 1 from get_anon_bdev.
+        */
+       dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
+                       GFP_ATOMIC);
+       if (dev == -ENOSPC)
+               dev = -EMFILE;
+       if (dev < 0)
+               return dev;
+
+       *p = MKDEV(0, dev);
        return 0;
 }
 EXPORT_SYMBOL(get_anon_bdev);
 
 void free_anon_bdev(dev_t dev)
 {
-       int slot = MINOR(dev);
-       spin_lock(&unnamed_dev_lock);
-       ida_remove(&unnamed_dev_ida, slot);
-       if (slot < unnamed_dev_start)
-               unnamed_dev_start = slot;
-       spin_unlock(&unnamed_dev_lock);
+       ida_free(&unnamed_dev_ida, MINOR(dev));
 }
 EXPORT_SYMBOL(free_anon_bdev);
 
@@ -1040,7 +1024,6 @@ int set_anon_super(struct super_block *s, void *data)
 {
        return get_anon_bdev(&s->s_dev);
 }
-
 EXPORT_SYMBOL(set_anon_super);
 
 void kill_anon_super(struct super_block *sb)
@@ -1049,7 +1032,6 @@ void kill_anon_super(struct super_block *sb)
        generic_shutdown_super(sb);
        free_anon_bdev(dev);
 }
-
 EXPORT_SYMBOL(kill_anon_super);
 
 void kill_litter_super(struct super_block *sb)
@@ -1058,7 +1040,6 @@ void kill_litter_super(struct super_block *sb)
                d_genocide(sb->s_root);
        kill_anon_super(sb);
 }
-
 EXPORT_SYMBOL(kill_litter_super);
 
 static int ns_test_super(struct super_block *sb, void *data)
index 3040dc2a32f6a17a4822343497577e871b03161c..6f515651a2c2fe3817c763e3d77d351509a862d9 100644 (file)
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
                            struct kernel_lb_addr *root)
 {
        struct buffer_head *bh = NULL;
-       long lastblock;
        uint16_t ident;
-       struct udf_sb_info *sbi;
 
        if (fileset->logicalBlockNum != 0xFFFFFFFF ||
            fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
                        return 1;
                }
 
-       }
-
-       sbi = UDF_SB(sb);
-       if (!bh) {
-               /* Search backwards through the partitions */
-               struct kernel_lb_addr newfileset;
-
-/* --> cvg: FIXME - is it reasonable? */
-               return 1;
-
-               for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
-                    (newfileset.partitionReferenceNum != 0xFFFF &&
-                     fileset->logicalBlockNum == 0xFFFFFFFF &&
-                     fileset->partitionReferenceNum == 0xFFFF);
-                    newfileset.partitionReferenceNum--) {
-                       lastblock = sbi->s_partmaps
-                                       [newfileset.partitionReferenceNum]
-                                               .s_partition_len;
-                       newfileset.logicalBlockNum = 0;
-
-                       do {
-                               bh = udf_read_ptagged(sb, &newfileset, 0,
-                                                     &ident);
-                               if (!bh) {
-                                       newfileset.logicalBlockNum++;
-                                       continue;
-                               }
-
-                               switch (ident) {
-                               case TAG_IDENT_SBD:
-                               {
-                                       struct spaceBitmapDesc *sp;
-                                       sp = (struct spaceBitmapDesc *)
-                                                               bh->b_data;
-                                       newfileset.logicalBlockNum += 1 +
-                                               ((le32_to_cpu(sp->numOfBytes) +
-                                                 sizeof(struct spaceBitmapDesc)
-                                                 - 1) >> sb->s_blocksize_bits);
-                                       brelse(bh);
-                                       break;
-                               }
-                               case TAG_IDENT_FSD:
-                                       *fileset = newfileset;
-                                       break;
-                               default:
-                                       newfileset.logicalBlockNum++;
-                                       brelse(bh);
-                                       bh = NULL;
-                                       break;
-                               }
-                       } while (newfileset.logicalBlockNum < lastblock &&
-                                fileset->logicalBlockNum == 0xFFFFFFFF &&
-                                fileset->partitionReferenceNum == 0xFFFF);
-               }
-       }
-
-       if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
-            fileset->partitionReferenceNum != 0xFFFF) && bh) {
                udf_debug("Fileset at block=%u, partition=%u\n",
                          fileset->logicalBlockNum,
                          fileset->partitionReferenceNum);
 
-               sbi->s_partition = fileset->partitionReferenceNum;
+               UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
                udf_load_fileset(sb, bh, root);
                brelse(bh);
                return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
  */
 #define PART_DESC_ALLOC_STEP 32
 
+struct part_desc_seq_scan_data {
+       struct udf_vds_record rec;
+       u32 partnum;
+};
+
 struct desc_seq_scan_data {
        struct udf_vds_record vds[VDS_POS_LENGTH];
        unsigned int size_part_descs;
-       struct udf_vds_record *part_descs_loc;
+       unsigned int num_part_descs;
+       struct part_desc_seq_scan_data *part_descs_loc;
 };
 
 static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
 {
        struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
        int partnum;
+       int i;
 
        partnum = le16_to_cpu(desc->partitionNumber);
-       if (partnum >= data->size_part_descs) {
-               struct udf_vds_record *new_loc;
+       for (i = 0; i < data->num_part_descs; i++)
+               if (partnum == data->part_descs_loc[i].partnum)
+                       return &(data->part_descs_loc[i].rec);
+       if (data->num_part_descs >= data->size_part_descs) {
+               struct part_desc_seq_scan_data *new_loc;
                unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
 
                new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
                data->part_descs_loc = new_loc;
                data->size_part_descs = new_size;
        }
-       return &(data->part_descs_loc[partnum]);
+       return &(data->part_descs_loc[data->num_part_descs++].rec);
 }
 
 
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
 
        memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
        data.size_part_descs = PART_DESC_ALLOC_STEP;
+       data.num_part_descs = 0;
        data.part_descs_loc = kcalloc(data.size_part_descs,
                                      sizeof(*data.part_descs_loc),
                                      GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
         * are in it.
         */
        for (; (!done && block <= lastblock); block++) {
-
                bh = udf_read_tagged(sb, block, block, &ident);
                if (!bh)
                        break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
        }
 
        /* Now handle prevailing Partition Descriptors */
-       for (i = 0; i < data.size_part_descs; i++) {
-               if (data.part_descs_loc[i].block) {
-                       ret = udf_load_partdesc(sb,
-                                               data.part_descs_loc[i].block);
-                       if (ret < 0)
-                               return ret;
-               }
+       for (i = 0; i < data.num_part_descs; i++) {
+               ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
index 66d1d45fa2e1c11dcf17231f0a32f883bb2c1a12..d356f802945ae3348540c0cd099372a3a97f1e9e 100644 (file)
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
 #define ioport_map ioport_map
 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
-       return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
+       port &= IO_SPACE_LIMIT;
+       return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
 #endif
 
index f173b5f30dbe9a68192cd5892cdcab37d8cb81ed..7b75ff6e2fceeb407e828c4e171176afc5ae7281 100644 (file)
@@ -54,8 +54,6 @@
 #define LOAD_OFFSET 0
 #endif
 
-#include <linux/export.h>
-
 /* Align . to a 8 byte boundary equals to maximum function alignment. */
 #define ALIGN_FUNCTION()  . = ALIGN(8)
 
index 46a8009784df1227f784774bdab50d39e03dceb5..152b3055e9e1feb449b682c8102df23d18a64df5 100644 (file)
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
 static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 {
        return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
-               dev->mode_config.funcs->atomic_commit != NULL;
+               (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
 }
 
 
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
new file mode 100644 (file)
index 0000000..05c3636
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ *
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H
+#define _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H
+
+#define AXG_ARB_TODDR_A        0
+#define AXG_ARB_TODDR_B        1
+#define AXG_ARB_TODDR_C        2
+#define AXG_ARB_FRDDR_A        3
+#define AXG_ARB_FRDDR_B        4
+#define AXG_ARB_FRDDR_C        5
+
+#endif /* _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H */
index ca1d2cc2cdfa09a8a760693b6b3c10e9c3f6a704..18863d56273cc7ea144bfaf366a2c74755ee72c9 100644 (file)
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
 
 #define __declare_arg_0(a0, res)                                       \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
        register unsigned long r1 asm("r1");                            \
        register unsigned long r2 asm("r2");                            \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_1(a0, a1, res)                                   \
+       typeof(a1) __a1 = a1;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
        register unsigned long r2 asm("r2");                            \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_2(a0, a1, a2, res)                               \
+       typeof(a1) __a1 = a1;                                           \
+       typeof(a2) __a2 = a2;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
-       register typeof(a2)    r2 asm("r2") = a2;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
+       register unsigned long r2 asm("r2") = __a2;                     \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_3(a0, a1, a2, a3, res)                           \
+       typeof(a1) __a1 = a1;                                           \
+       typeof(a2) __a2 = a2;                                           \
+       typeof(a3) __a3 = a3;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
-       register typeof(a2)    r2 asm("r2") = a2;                       \
-       register typeof(a3)    r3 asm("r3") = a3
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
+       register unsigned long r2 asm("r2") = __a2;                     \
+       register unsigned long r3 asm("r3") = __a3
 
 #define __declare_arg_4(a0, a1, a2, a3, a4, res)                       \
+       typeof(a4) __a4 = a4;                                           \
        __declare_arg_3(a0, a1, a2, a3, res);                           \
-       register typeof(a4) r4 asm("r4") = a4
+       register unsigned long r4 asm("r4") = __a4
 
 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)                   \
+       typeof(a5) __a5 = a5;                                           \
        __declare_arg_4(a0, a1, a2, a3, a4, res);                       \
-       register typeof(a5) r5 asm("r5") = a5
+       register unsigned long r5 asm("r5") = __a5
 
 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)               \
+       typeof(a6) __a6 = a6;                                           \
        __declare_arg_5(a0, a1, a2, a3, a4, a5, res);                   \
-       register typeof(a6) r6 asm("r6") = a6
+       register unsigned long r6 asm("r6") = __a6
 
 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)           \
+       typeof(a7) __a7 = a7;                                           \
        __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);               \
-       register typeof(a7) r7 asm("r7") = a7
+       register unsigned long r7 asm("r7") = __a7
 
 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
 #define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
index 763bbad1e258048d9ea3e32764f4a79b8c5b59fb..4d36b27214fda629b7808ce662216d61d47e0265 100644 (file)
 #define __noretpoline __attribute__((indirect_branch("keep")))
 #endif
 
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked                __attribute__((naked)) noinline __noclone notrace
-
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
 #define __optimize(level)      __attribute__((__optimize__(level)))
index 3525c179698c238c70145574c03e71620054777c..db192becfec4998df459bc46eb739676a066852e 100644 (file)
@@ -226,6 +226,14 @@ struct ftrace_likely_data {
 #define notrace                        __attribute__((no_instrument_function))
 #endif
 
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked                        __attribute__((naked)) notrace
+
 #define __compiler_offsetof(a, b)      __builtin_offsetof(a, b)
 
 /*
index deb0f663252fc55e39546c7d3107e96dfb3f03ae..450b28db95331ffbe19963c804391d7249cfb2c3 100644 (file)
@@ -88,6 +88,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                struct block_device *bdev, struct writeback_control *wbc);
 
 struct page *dax_layout_busy_page(struct address_space *mapping);
+bool dax_lock_mapping_entry(struct page *page);
+void dax_unlock_mapping_entry(struct page *page);
 #else
 static inline bool bdev_dax_supported(struct block_device *bdev,
                int blocksize)
@@ -119,6 +121,17 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
 {
        return -EOPNOTSUPP;
 }
+
+static inline bool dax_lock_mapping_entry(struct page *page)
+{
+       if (IS_DAX(page->mapping->host))
+               return true;
+       return false;
+}
+
+static inline void dax_unlock_mapping_entry(struct page *page)
+{
+}
 #endif
 
 int dax_read_lock(void);
index ae072bc5aacf8365245bdbc50f6c19b9e3d5812c..ce764a5d2ee45023c99da81485f77bd958480de7 100644 (file)
  * hackers place grumpy comments in header files.
  */
 
-#define __VMLINUX_SYMBOL(x) x
-#define __VMLINUX_SYMBOL_STR(x) #x
-
-/* Indirect, so macros are expanded before pasting. */
-#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
-#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
-
 #ifndef __ASSEMBLY__
 #ifdef MODULE
 extern struct module __this_module;
index 33322702c910b9cb879d05a03c070c51487c0d26..6c0b4a1c22ff5bd84c5c4e29ce07b0366b7bc5b1 100644 (file)
@@ -1763,6 +1763,7 @@ struct file_operations {
                        u64);
        int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
                        u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 } __randomize_layout;
 
 struct inode_operations {
@@ -3459,4 +3460,8 @@ static inline bool dir_relax_shared(struct inode *inode)
 extern bool path_noexec(const struct path *path);
 extern void inode_nohighmem(struct inode *inode);
 
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+                      int advice);
+
 #endif /* _LINUX_FS_H */
index 834e6461a69059b059556a93e29092cd68a976fc..d44a783629425aef596d5152b23a18cdf3841c62 100644 (file)
@@ -526,6 +526,7 @@ struct hid_input {
        const char *name;
        bool registered;
        struct list_head reports;       /* the list of reports */
+       unsigned int application;       /* application usage for this input */
 };
 
 enum hid_type {
index 27e3e32135a84de8d9c9834ae195f653c6ed2b78..99c19b06d9a46d2cebf20ad5f21a1612a94b5855 100644 (file)
@@ -3,6 +3,7 @@
 #define _LINUX_HUGE_MM_H
 
 #include <linux/sched/coredump.h>
+#include <linux/mm_types.h>
 
 #include <linux/fs.h> /* only for vma_is_dax() */
 
@@ -46,9 +47,9 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
                        int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmd, pfn_t pfn, bool write);
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
                        pud_t *pud, pfn_t pfn, bool write);
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
index b79387fd57da9c2673aee5db81fb4fa2e74ffea8..65b4eaed1d965ca193407f9209a62310a51aafb1 100644 (file)
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
 }
 
 u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
 
 int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
 /**
index 3e8215b2c371c27406280fa5c4eccc098cebde7f..3ec8628ce17f044ad1d3ce626e95f368a55844d9 100644 (file)
@@ -236,34 +236,74 @@ struct ida {
 }
 #define DEFINE_IDA(name)       struct ida name = IDA_INIT(name)
 
-int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
-int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
-void ida_remove(struct ida *ida, int id);
+int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
+void ida_free(struct ida *, unsigned int id);
 void ida_destroy(struct ida *ida);
 
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
-                  gfp_t gfp_mask);
-void ida_simple_remove(struct ida *ida, unsigned int id);
+/**
+ * ida_alloc() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and %INT_MAX, inclusive.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc(struct ida *ida, gfp_t gfp)
+{
+       return ida_alloc_range(ida, 0, ~0, gfp);
+}
 
-static inline void ida_init(struct ida *ida)
+/**
+ * ida_alloc_min() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between @min and %INT_MAX, inclusive.
+ *
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
+ */
+static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
 {
-       INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+       return ida_alloc_range(ida, min, ~0, gfp);
 }
 
 /**
- * ida_get_new - allocate new ID
- * @ida:       idr handle
- * @p_id:      pointer to the allocated handle
+ * ida_alloc_max() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Allocate an ID between 0 and @max, inclusive.
  *
- * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
  */
-static inline int ida_get_new(struct ida *ida, int *p_id)
+static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
 {
-       return ida_get_new_above(ida, 0, p_id);
+       return ida_alloc_range(ida, 0, max, gfp);
 }
 
+static inline void ida_init(struct ida *ida)
+{
+       INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+}
+
+#define ida_simple_get(ida, start, end, gfp)   \
+                       ida_alloc_range(ida, start, (end) - 1, gfp)
+#define ida_simple_remove(ida, id)     ida_free(ida, id)
+
 static inline bool ida_is_empty(const struct ida *ida)
 {
        return radix_tree_empty(&ida->ida_rt);
 }
+
+/* in lib/radix-tree.c */
+int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 #endif /* __IDR_H__ */
index c20f296438fbb84997474cb681a51e57eef558ef..8c3f8c14eeaafd5b1035f4ef2fec81384f94f77c 100644 (file)
@@ -12,11 +12,13 @@ enum kcore_type {
        KCORE_VMEMMAP,
        KCORE_USER,
        KCORE_OTHER,
+       KCORE_REMAP,
 };
 
 struct kcore_list {
        struct list_head list;
        unsigned long addr;
+       unsigned long vaddr;
        size_t size;
        int type;
 };
@@ -36,11 +38,22 @@ struct vmcoredd_node {
 
 #ifdef CONFIG_PROC_KCORE
 void __init kclist_add(struct kcore_list *, void *, size_t, int type);
+static inline
+void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
+{
+       m->vaddr = (unsigned long)vaddr;
+       kclist_add(m, addr, sz, KCORE_REMAP);
+}
 #else
 static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 {
 }
+
+static inline
+void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
+{
+}
 #endif
 
 #endif /* _LINUX_KCORE_H */
index 7a452716de4be0a7902f26ccc97ce124c6b168ff..66d94b4557cf789906455683e4e69e40750f3b2c 100644 (file)
@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
 struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
-       u32                     frag_sz_m1;
-       u32                     strides_offset;
+       u16                     frag_sz_m1;
+       u16                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
 }
 
 static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
-                                       u32 strides_offset,
+                                       u16 strides_offset,
                                        struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
@@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
index 8fcc36660de672c84fce606ba0a1fff22cd5a537..a61ebe8ad4ca92e72e23855c17f8e7c9ad059a54 100644 (file)
@@ -2731,6 +2731,7 @@ enum mf_action_page_type {
        MF_MSG_TRUNCATED_LRU,
        MF_MSG_BUDDY,
        MF_MSG_BUDDY_2ND,
+       MF_MSG_DAX,
        MF_MSG_UNKNOWN,
 };
 
index cd2bc939efd0db82d16cc3417b6d027771f8f1f4..5ed8f6292a533c2efbc4390d1a6a165bf4ad2252 100644 (file)
@@ -341,7 +341,7 @@ struct mm_struct {
        struct {
                struct vm_area_struct *mmap;            /* list of VMAs */
                struct rb_root mm_rb;
-               u32 vmacache_seqnum;                   /* per-thread vmacache */
+               u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
                unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
index 5fe87687664c7d78046060499cf96b28790190cb..d7016dcb245eeaff7ba8820a3fd73e267c0e46cb 100644 (file)
@@ -32,7 +32,7 @@
 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
 
 struct vmacache {
-       u32 seqnum;
+       u64 seqnum;
        struct vm_area_struct *vmas[VMACACHE_SIZE];
 };
 
index 1298a7daa57d8aaf0148894523cba76180d5d46b..01797cb4587ede275db60176820571a3dea92c18 100644 (file)
@@ -754,6 +754,7 @@ struct tb_service_id {
  * struct typec_device_id - USB Type-C alternate mode identifiers
  * @svid: Standard or Vendor ID
  * @mode: Mode index
+ * @driver_data: Driver specific data
  */
 struct typec_device_id {
        __u16 svid;
index 4d25e4f952d9bd4c02b1f3c33fea0c4254907e14..99b0ebf496329f347cf723ad5da07ad5c23bcf9d 100644 (file)
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
 #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
 #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
 
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
 static inline const char *of_node_full_name(const struct device_node *np)
 {
        return np ? np->full_name : "<no-node>";
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
 extern struct device_node *of_get_next_available_child(
        const struct device_node *node, struct device_node *prev);
 
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+                                       const char *compatible);
 extern struct device_node *of_get_child_by_name(const struct device_node *node,
                                        const char *name);
 
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
        return NULL;
 }
 
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+       return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+       return false;
+}
+
 static inline const char* of_node_full_name(const struct device_node *np)
 {
        return "<no-node>";
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void)
        return false;
 }
 
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+                                       const char *compatible)
+{
+       return NULL;
+}
+
 static inline struct device_node *of_get_child_by_name(
                                        const struct device_node *node,
                                        const char *name)
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node(
        return of_find_matching_node_and_match(from, matches, NULL);
 }
 
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+       return of_get_property(np, "type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+       const char *match = of_node_get_device_type(np);
+
+       return np && match && type && !strcmp(match, type);
+}
+
 /**
  * of_property_count_u8_elems - Count the number of u8 elements in a property
  *
index e72ca8dd6241b88cae56cc27de7e05e10182f267..6925828f9f250fae21e19ef8338d46694621e2a2 100644 (file)
@@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
 int devm_request_pci_bus_resources(struct device *dev,
                                   struct list_head *resources);
 
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
 #define pci_bus_for_each_resource(bus, res, i)                         \
        for (i = 0;                                                     \
            (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
index 99d366cb0e9f5327081cea07b34c798df6aec35f..d157983b84cf9258fa43b639accc8ee3904a080c 100644 (file)
 
 #define PCI_VENDOR_ID_OCZ              0x1b85
 
+#define PCI_VENDOR_ID_NCUBE            0x10ff
+
 #endif /* _LINUX_PCI_IDS_H */
index 9abc0ca7259b78d5400166dbafd81825ca05b915..9f0aa1b48c7849ae4fd7545bfbf5d8b5a3db102e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index ca9772c8e48b0652cf1cfa0929d037cff1e846f3..f32dd270b8e3f26a91cb6b10cdd7ac3dbb2cf6e8 100644 (file)
@@ -408,13 +408,7 @@ struct qc_type_state {
 
 struct qc_state {
        unsigned int s_incoredqs;       /* Number of dquots in core */
-       /*
-        * Per quota type information. The array should really have
-        * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
-        * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough.  Once VFS
-        * supports project quotas, this can be changed to MAXQUOTAS
-        */
-       struct qc_type_state s_state[XQM_MAXQUOTAS];
+       struct qc_type_state s_state[MAXQUOTAS];  /* Per quota type information */
 };
 
 /* Structure for communicating via ->set_info */
index da5178216da54705e905f2c81388b8006b4a37d4..2a986d282a975b5bb5b244efad5419334f74cfaa 100644 (file)
@@ -17,6 +17,20 @@ static inline int set_memory_x(unsigned long addr,  int numpages) { return 0; }
 static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
+#ifndef set_mce_nospec
+static inline int set_mce_nospec(unsigned long pfn)
+{
+       return 0;
+}
+#endif
+
+#ifndef clear_mce_nospec
+static inline int clear_mce_nospec(unsigned long pfn)
+{
+       return 0;
+}
+#endif
+
 #ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT
 static inline int set_memory_encrypted(unsigned long addr, int numpages)
 {
index 0b14f936100a24ead41455de43dda08c8448cca2..d1ae43c13e25940e2febed51919c171e3018ed3f 100644 (file)
@@ -207,4 +207,19 @@ static inline s64 timeval_to_ns(const struct timeval *tv)
 extern struct timeval ns_to_timeval(const s64 nsec);
 extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
 
+/*
+ * New aliases for compat time functions. These will be used to replace
+ * the compat code so it can be shared between 32-bit and 64-bit builds
+ * both of which provide compatibility with old 32-bit tasks.
+ */
+#define old_time32_t           compat_time_t
+#define old_timeval32          compat_timeval
+#define old_timespec32         compat_timespec
+#define old_itimerspec32       compat_itimerspec
+#define ns_to_old_timeval32    ns_to_compat_timeval
+#define get_old_itimerspec32   get_compat_itimerspec64
+#define put_old_itimerspec32   put_compat_itimerspec64
+#define get_old_timespec32     compat_get_timespec64
+#define put_old_timespec32     compat_put_timespec64
+
 #endif
index 5d738804e3d6241dae4961c93324efbdd4f5aaea..a5a3cfc3c2fa754fd8f21fc721094268a4aa256b 100644 (file)
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 
 extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
-                                          struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+                                         struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 
 /*
index 7f2e16e76ac476b4fb07223af23d15490c14b4d4..041f7e56a2894f3f800fc470d8a5bccaffc58b88 100644 (file)
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
                 * For rcuidle callers, use srcu since sched-rcu        \
                 * doesn't work from the idle path.                     \
                 */                                                     \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
                        idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                       rcu_irq_enter_irqson();                         \
+               }                                                       \
                                                                        \
                it_func_ptr = rcu_dereference_raw((tp)->funcs);         \
                                                                        \
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
                                                                        \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
+                       rcu_irq_exit_irqson();                          \
                        srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+               }                                                       \
                                                                        \
                preempt_enable_notrace();                               \
        } while (0)
index a34539b7f7508e1a7ae37580823b44858d66ea84..7e6ac0114d556bda8580fba760e223fb57e7fd14 100644 (file)
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
  * @can_switch: check if the device is in a position to switch now.
  *     Mandatory. The client should return false if a user space process
  *     has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
  *
  * Client callbacks. A client can be either a GPU or an audio device on a GPU.
  * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
  * set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
  */
 struct vga_switcheroo_client_ops {
        void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
        void (*reprobe)(struct pci_dev *dev);
        bool (*can_switch)(struct pci_dev *dev);
+       void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
 };
 
 #if defined(CONFIG_VGA_SWITCHEROO)
index 5c7f010676a74206e06282337ca77b578058f2f1..47a3441cf4c4a4d59f588aac2c181ba1d8098c4f 100644 (file)
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_DEBUG_VM_VMACACHE
                VMACACHE_FIND_CALLS,
                VMACACHE_FIND_HITS,
-               VMACACHE_FULL_FLUSHES,
 #endif
 #ifdef CONFIG_SWAP
                SWAP_RA,
index 3e9a963edd6a89f2d8d34616690dba2888aed7f7..6fce268a4588e5a064214b84309f4520f8759d65 100644 (file)
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
        memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
                                                    unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
        mm->vmacache_seqnum++;
-
-       /* deal with overflows */
-       if (unlikely(mm->vmacache_seqnum == 0))
-               vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
index 1ad5b19e83a95d0a4ba17181f1f55134022791f1..970303448c9029e2aaab37e3735bc83e0490de04 100644 (file)
@@ -23,13 +23,11 @@ struct tc_action {
        const struct tc_action_ops      *ops;
        __u32                           type; /* for backward compat(TCA_OLD_COMPAT) */
        __u32                           order;
-       struct list_head                list;
        struct tcf_idrinfo              *idrinfo;
 
        u32                             tcfa_index;
        refcount_t                      tcfa_refcnt;
        atomic_t                        tcfa_bindcnt;
-       u32                             tcfa_capab;
        int                             tcfa_action;
        struct tcf_t                    tcfa_tm;
        struct gnet_stats_basic_packed  tcfa_bstats;
@@ -44,7 +42,6 @@ struct tc_action {
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
 #define tcf_bindcnt    common.tcfa_bindcnt
-#define tcf_capab      common.tcfa_capab
 #define tcf_action     common.tcfa_action
 #define tcf_tm         common.tcfa_tm
 #define tcf_bstats     common.tcfa_bstats
@@ -102,7 +99,6 @@ struct tc_action_ops {
        size_t  (*get_fill_size)(const struct tc_action *act);
        struct net_device *(*get_dev)(const struct tc_action *a);
        void    (*put_dev)(struct net_device *dev);
-       int     (*delete)(struct net *net, u32 index);
 };
 
 struct tc_action_net {
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
                       const struct tc_action_ops *ops,
                       struct netlink_ext_ack *extack);
 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
-                   int bind);
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
                   int bind, bool cpustats);
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
                        struct tc_action **a, int bind);
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
 int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
 
 static inline int tcf_idr_release(struct tc_action *a, bool bind)
index 9a850973e09a739aaa72d5be951436640bc9c131..8ebabc9873d1593b46161697b53c8a8d14242000 100644 (file)
@@ -4865,8 +4865,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * Return: 0 on success. -ENODATA.
  */
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
-                       struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+                       struct ieee80211_reg_rule *rule);
 
 /*
  * callbacks for asynchronous cfg80211 methods, notification
index d5f62cc6c2ae44473b6233182d7825ee8e5d1ef5..3394d75e1c80380c9a27fcb31fa67e831afb19c7 100644 (file)
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
 };
 
 static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
 {
        struct nf_ct_timeout *timeout;
 
index ef727f71336e7e5f89d92000194bcfab4bacc5a8..75a3f3fdb3591720d266fd33d98c608fc673b6ca 100644 (file)
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
 #endif
 }
 
-static inline void tcf_exts_to_list(const struct tcf_exts *exts,
-                                   struct list_head *actions)
-{
 #ifdef CONFIG_NET_CLS_ACT
-       int i;
-
-       for (i = 0; i < exts->nr_actions; i++) {
-               struct tc_action *a = exts->actions[i];
-
-               list_add_tail(&a->list, actions);
-       }
+#define tcf_exts_for_each_action(i, a, exts) \
+       for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+       for (; 0; (void)(i), (void)(a), (void)(exts))
 #endif
-}
 
 static inline void
 tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
 #endif
 }
 
+static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       return exts->actions[0];
+#else
+       return NULL;
+#endif
+}
+
 /**
  * tcf_exts_exec - execute tc filter extensions
  * @skb: socket buffer
index 60f8cc86a44701c151c1f74ed6d7e5fb061c0fc2..3469750df0f44542bce0a97aa23360cbfe7079fc 100644 (file)
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
 struct ieee80211_reg_rule {
        struct ieee80211_freq_range freq_range;
        struct ieee80211_power_rule power_rule;
-       struct ieee80211_wmm_rule *wmm_rule;
+       struct ieee80211_wmm_rule wmm_rule;
        u32 flags;
        u32 dfs_cac_ms;
+       bool has_wmm;
 };
 
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       u32 n_wmm_rules;
        char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
index d5c683e8bb22adf9654e07d791d2ad782d2dec02..0a769cf2f5f3e4fac6afb909d0af41acc166fbfb 100644 (file)
@@ -171,15 +171,14 @@ struct cipher_context {
        char *rec_seq;
 };
 
+union tls_crypto_context {
+       struct tls_crypto_info info;
+       struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+};
+
 struct tls_context {
-       union {
-               struct tls_crypto_info crypto_send;
-               struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
-       };
-       union {
-               struct tls_crypto_info crypto_recv;
-               struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
-       };
+       union tls_crypto_context crypto_send;
+       union tls_crypto_context crypto_recv;
 
        struct list_head list;
        struct net_device *netdev;
@@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
         * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
         */
        buf[0] = record_type;
-       buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
-       buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+       buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
+       buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
        /* we can use IV for nonce explicit according to spec */
        buf[3] = pkt_len >> 8;
        buf[4] = pkt_len & 0xFF;
index 6f1e1f3b30633927283a0bbf09df6fa4418c2f28..cd1773d0e08f07247a42788a035a0355da113493 100644 (file)
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
 void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
 void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
 void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
 
 void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
 int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
index af9ef16cc34d36f6fd314cc2522997a4b2dbc12f..fdaaafdc7a0039a0c4a6e93b6d2b7ed88ea5b74a 100644 (file)
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
 int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
 void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
 int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+                        struct snd_soc_pcm_runtime *rtd,
                         const struct snd_soc_pcm_stream *params,
                         unsigned int num_params,
                         struct snd_soc_dapm_widget *source,
index 7b8c9e19bad1c2bf72c21dcf6b358559d5e4b4ea..910cc4334b21557f98298082b16e2359a6414e01 100644 (file)
@@ -65,7 +65,7 @@
 
 /* keyctl structures */
 struct keyctl_dh_params {
-       __s32 private;
+       __s32 dh_private;
        __s32 prime;
        __s32 base;
 };
index eeb787b1c53c72771c8d684154b7a87dc029a45b..f35eb72739c09e3ad0bd22e279fa4a33119c15f6 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index dc520e1a4123f7a60d4996b5d0df7d237199cdba..8b73cb603c5f32c78ff4ac78df4e78453a449cd9 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/types.h>
 #include <linux/socket.h>              /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h>                 /* For struct in6_addr. */
 
 #define RDS_IB_ABI_VERSION             0x301
 
index b1e22c40c4b68a0b39d0a8a567baea1f3b5403ed..84c3de89696a15c1a23e7226dea456a6232e2043 100644 (file)
@@ -176,7 +176,7 @@ struct vhost_memory {
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* VHOST_NET specific defines */
 
index f58cafa42f183213113442b6bd55a85aa5d10301..f39352cef38223e4b1b5ab70693ca3deb5eee451 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef __HDA_TPLG_INTERFACE_H__
 #define __HDA_TPLG_INTERFACE_H__
 
+#include <linux/types.h>
+
 /*
  * Default types range from 0~12. type can range from 0 to 0xff
  * SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
 };
 
 struct skl_dfw_algo_data {
-       u32 set_params:2;
-       u32 rsvd:30;
-       u32 param_id;
-       u32 max;
+       __u32 set_params:2;
+       __u32 rsvd:30;
+       __u32 param_id;
+       __u32 max;
        char params[0];
 } __packed;
 
@@ -163,68 +165,68 @@ enum skl_tuple_type {
 /* v4 configuration data */
 
 struct skl_dfw_v4_module_pin {
-       u16 module_id;
-       u16 instance_id;
+       __u16 module_id;
+       __u16 instance_id;
 } __packed;
 
 struct skl_dfw_v4_module_fmt {
-       u32 channels;
-       u32 freq;
-       u32 bit_depth;
-       u32 valid_bit_depth;
-       u32 ch_cfg;
-       u32 interleaving_style;
-       u32 sample_type;
-       u32 ch_map;
+       __u32 channels;
+       __u32 freq;
+       __u32 bit_depth;
+       __u32 valid_bit_depth;
+       __u32 ch_cfg;
+       __u32 interleaving_style;
+       __u32 sample_type;
+       __u32 ch_map;
 } __packed;
 
 struct skl_dfw_v4_module_caps {
-       u32 set_params:2;
-       u32 rsvd:30;
-       u32 param_id;
-       u32 caps_size;
-       u32 caps[HDA_SST_CFG_MAX];
+       __u32 set_params:2;
+       __u32 rsvd:30;
+       __u32 param_id;
+       __u32 caps_size;
+       __u32 caps[HDA_SST_CFG_MAX];
 } __packed;
 
 struct skl_dfw_v4_pipe {
-       u8 pipe_id;
-       u8 pipe_priority;
-       u16 conn_type:4;
-       u16 rsvd:4;
-       u16 memory_pages:8;
+       __u8 pipe_id;
+       __u8 pipe_priority;
+       __u16 conn_type:4;
+       __u16 rsvd:4;
+       __u16 memory_pages:8;
 } __packed;
 
 struct skl_dfw_v4_module {
        char uuid[SKL_UUID_STR_SZ];
 
-       u16 module_id;
-       u16 instance_id;
-       u32 max_mcps;
-       u32 mem_pages;
-       u32 obs;
-       u32 ibs;
-       u32 vbus_id;
-
-       u32 max_in_queue:8;
-       u32 max_out_queue:8;
-       u32 time_slot:8;
-       u32 core_id:4;
-       u32 rsvd1:4;
-
-       u32 module_type:8;
-       u32 conn_type:4;
-       u32 dev_type:4;
-       u32 hw_conn_type:4;
-       u32 rsvd2:12;
-
-       u32 params_fixup:8;
-       u32 converter:8;
-       u32 input_pin_type:1;
-       u32 output_pin_type:1;
-       u32 is_dynamic_in_pin:1;
-       u32 is_dynamic_out_pin:1;
-       u32 is_loadable:1;
-       u32 rsvd3:11;
+       __u16 module_id;
+       __u16 instance_id;
+       __u32 max_mcps;
+       __u32 mem_pages;
+       __u32 obs;
+       __u32 ibs;
+       __u32 vbus_id;
+
+       __u32 max_in_queue:8;
+       __u32 max_out_queue:8;
+       __u32 time_slot:8;
+       __u32 core_id:4;
+       __u32 rsvd1:4;
+
+       __u32 module_type:8;
+       __u32 conn_type:4;
+       __u32 dev_type:4;
+       __u32 hw_conn_type:4;
+       __u32 rsvd2:12;
+
+       __u32 params_fixup:8;
+       __u32 converter:8;
+       __u32 input_pin_type:1;
+       __u32 output_pin_type:1;
+       __u32 is_dynamic_in_pin:1;
+       __u32 is_dynamic_out_pin:1;
+       __u32 is_loadable:1;
+       __u32 rsvd3:11;
 
        struct skl_dfw_v4_pipe pipe;
        struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
index 80b52b4945e965d96ce068fe3d729e9364cc9868..a2ab516fcd2caf33167f3e64a76c2e77b726d6df 100644 (file)
 
 #include <xen/page.h>
 
+extern bool xen_scrub_pages;
+
 static inline void xenmem_reservation_scrub_page(struct page *page)
 {
-#ifdef CONFIG_XEN_SCRUB_PAGES
-       clear_highpage(page);
-#endif
+       if (xen_scrub_pages)
+               clear_highpage(page);
 }
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
index 641dd7dd7c8ab71ae7f7d385ce03412b18477892..1e234e2f1cba7a0f1ca9e2ae18868d50e1fe4aaf 100644 (file)
@@ -1097,6 +1097,8 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        bool "Dead code and data elimination (EXPERIMENTAL)"
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
+       depends on $(cc-option,-ffunction-sections -fdata-sections)
+       depends on $(ld-option,--gc-sections)
        help
          Enable this if you want to do dead code and data elimination with
          the linker by compiling with -ffunction-sections -fdata-sections,
index b0eb3757ab895d07970bec60b55f9bd895f7bd8a..4cd402e4cfeb603e2417a3796c3d9b22f3022f89 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
        }
 
        ipc_unlock_object(ipcp);
+       ipcp = ERR_PTR(-EIDRM);
 err:
        rcu_read_unlock();
        /*
index 2590700237c13cd7a1a7394c8797a0a2aafd063a..138f0302692ec4bfc1138075f69a858077221eb5 100644 (file)
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
 
        hdr = &btf->hdr;
        cur = btf->nohdr_data + hdr->type_off;
-       end = btf->nohdr_data + hdr->type_len;
+       end = cur + hdr->type_len;
 
        env->log_type_id = 1;
        while (cur < end) {
index 04b8eda94e7d5f3f6c5b89655d808653c7479bbd..03cc59ee9c9536b885d605027af093c3dd0634ee 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jhash.h>
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
+#include <linux/random.h>
 #include <uapi/linux/btf.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
        atomic_t count; /* number of elements in this hashtable */
        u32 n_buckets;  /* number of hash buckets */
        u32 elem_size;  /* size of each element in bytes */
+       u32 hashrnd;
 };
 
 /* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        if (!htab->buckets)
                goto free_htab;
 
+       htab->hashrnd = get_random_int();
        for (i = 0; i < htab->n_buckets; i++) {
                INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
                raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline u32 htab_map_hash(const void *key, u32 key_len)
+static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
 {
-       return jhash(key, key_len, 0);
+       return jhash(key, key_len, hashrnd);
 }
 
 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        head = select_bucket(htab, hash);
 
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        if (!key)
                goto find_first_elem;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        head = select_bucket(htab, hash);
 
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
        b = __select_bucket(htab, hash);
        head = &b->head;
 
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
        b = __select_bucket(htab, hash);
        head = &b->head;
 
index 98e621a29e8e6953ec9dec5b4cb6f8559dd750d3..488ef9663c01f3b4d2cc44b9ca88863ced2860e7 100644 (file)
@@ -236,7 +236,7 @@ static int bpf_tcp_init(struct sock *sk)
 }
 
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
 
 static void bpf_tcp_release(struct sock *sk)
 {
@@ -248,7 +248,7 @@ static void bpf_tcp_release(struct sock *sk)
                goto out;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
@@ -330,14 +330,14 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
        close_fun = psock->save_close;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -369,7 +369,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                        /* If another thread deleted this object skip deletion.
                         * The refcnt on psock may or may not be zero.
                         */
-                       if (l) {
+                       if (l && l == link) {
                                hlist_del_rcu(&link->hash_node);
                                smap_release_sock(psock, link->sk);
                                free_htab_elem(htab, link);
@@ -570,14 +570,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
        md->sg_start = i;
 }
 
-static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+static int free_sg(struct sock *sk, int start,
+                  struct sk_msg_buff *md, bool charge)
 {
        struct scatterlist *sg = md->sg_data;
        int i = start, free = 0;
 
        while (sg[i].length) {
                free += sg[i].length;
-               sk_mem_uncharge(sk, sg[i].length);
+               if (charge)
+                       sk_mem_uncharge(sk, sg[i].length);
                if (!md->skb)
                        put_page(sg_page(&sg[i]));
                sg[i].length = 0;
@@ -594,9 +596,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        return free;
 }
 
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
 {
-       int free = free_sg(sk, md->sg_start, md);
+       int free = free_sg(sk, md->sg_start, md, charge);
 
        md->sg_start = md->sg_end;
        return free;
@@ -604,7 +606,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
 
 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
 {
-       return free_sg(sk, md->sg_curr, md);
+       return free_sg(sk, md->sg_curr, md, true);
 }
 
 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +720,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
                list_add_tail(&r->list, &psock->ingress);
                sk->sk_data_ready(sk);
        } else {
-               free_start_sg(sk, r);
+               free_start_sg(sk, r, true);
                kfree(r);
        }
 
@@ -752,14 +754,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
                release_sock(sk);
        }
        smap_release_sock(psock, sk);
-       if (unlikely(err))
-               goto out;
-       return 0;
+       return err;
 out_rcu:
        rcu_read_unlock();
-out:
-       free_bytes_sg(NULL, send, md, false);
-       return err;
+       return 0;
 }
 
 static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +820,7 @@ more_data:
        case __SK_PASS:
                err = bpf_tcp_push(sk, send, m, flags, true);
                if (unlikely(err)) {
-                       *copied -= free_start_sg(sk, m);
+                       *copied -= free_start_sg(sk, m, true);
                        break;
                }
 
@@ -845,16 +843,17 @@ more_data:
                lock_sock(sk);
 
                if (unlikely(err < 0)) {
-                       free_start_sg(sk, m);
+                       int free = free_start_sg(sk, m, false);
+
                        psock->sg_size = 0;
                        if (!cork)
-                               *copied -= send;
+                               *copied -= free;
                } else {
                        psock->sg_size -= send;
                }
 
                if (cork) {
-                       free_start_sg(sk, m);
+                       free_start_sg(sk, m, true);
                        psock->sg_size = 0;
                        kfree(m);
                        m = NULL;
@@ -912,6 +911,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
@@ -922,9 +923,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                goto out;
        rcu_read_unlock();
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        lock_sock(sk);
 bytes_ready:
        while (copied != len) {
@@ -1122,7 +1120,7 @@ wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
                if (err) {
                        if (m && m != psock->cork)
-                               free_start_sg(sk, m);
+                               free_start_sg(sk, m, true);
                        goto out_err;
                }
        }
@@ -1427,12 +1425,15 @@ out:
 static void smap_write_space(struct sock *sk)
 {
        struct smap_psock *psock;
+       void (*write_space)(struct sock *sk);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
        if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
                schedule_work(&psock->tx_work);
+       write_space = psock->save_write_space;
        rcu_read_unlock();
+       write_space(sk);
 }
 
 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -1461,10 +1462,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
        schedule_work(&psock->gc_work);
 }
 
+static bool psock_is_smap_sk(struct sock *sk)
+{
+       return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
+}
+
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
-               tcp_cleanup_ulp(sock);
+               if (psock_is_smap_sk(sock))
+                       tcp_cleanup_ulp(sock);
                write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
                write_unlock_bh(&sock->sk_callback_lock);
@@ -1578,13 +1585,13 @@ static void smap_gc_work(struct work_struct *w)
                bpf_prog_put(psock->bpf_tx_msg);
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -1891,6 +1898,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         * doesn't update user data.
         */
        if (psock) {
+               if (!psock_is_smap_sk(sock)) {
+                       err = -EBUSY;
+                       goto out_progs;
+               }
                if (READ_ONCE(psock->bpf_parse) && parse) {
                        err = -EBUSY;
                        goto out_progs;
@@ -2140,7 +2151,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
                return ERR_PTR(-EPERM);
 
        /* check sanity of attributes */
-       if (attr->max_entries == 0 || attr->value_size != 4 ||
+       if (attr->max_entries == 0 ||
+           attr->key_size == 0 ||
+           attr->value_size != 4 ||
            attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
                return ERR_PTR(-EINVAL);
 
@@ -2267,8 +2280,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        }
        l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
                             htab->map.numa_node);
-       if (!l_new)
+       if (!l_new) {
+               atomic_dec(&htab->count);
                return ERR_PTR(-ENOMEM);
+       }
 
        memcpy(l_new->key, key, key_size);
        l_new->sk = sk;
index 92246117d2b0395e5eec8329455e7ee7c8320d58..bb07e74b34a225f1b21cc61ca0a43c848d3d532f 100644 (file)
@@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                                 * an arbitrary scalar. Disallow all math except
                                 * pointer subtraction
                                 */
-                               if (opcode == BPF_SUB){
+                               if (opcode == BPF_SUB && env->allow_ptr_leaks) {
                                        mark_reg_unknown(env, regs, insn->dst_reg);
                                        return 0;
                                }
index ed44d7d34c2d9bfc08093af4854dc78bf7232b21..0097acec1c717dee6948e50b57ab4c4bb0c15ac7 100644 (file)
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
  * @name:      Name of the step
  * @startup:   Startup function of the step
  * @teardown:  Teardown function of the step
- * @skip_onerr:        Do not invoke the functions on error rollback
- *             Will go away once the notifiers are gone
  * @cant_stop: Bringup/teardown can't be stopped at this step
  */
 struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
                                         struct hlist_node *node);
        } teardown;
        struct hlist_head       list;
-       bool                    skip_onerr;
        bool                    cant_stop;
        bool                    multi_instance;
 };
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
 
 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
 {
-       for (st->state--; st->state > st->target; st->state--) {
-               struct cpuhp_step *step = cpuhp_get_step(st->state);
-
-               if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
-       }
+       for (st->state--; st->state > st->target; st->state--)
+               cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 }
 
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -614,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
        bool bringup = st->bringup;
        enum cpuhp_state state;
 
+       if (WARN_ON_ONCE(!st->should_run))
+               return;
+
        /*
         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
         * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
 
-       if (WARN_ON_ONCE(!st->should_run))
-               return;
-
        cpuhp_lock_acquire(bringup);
 
        if (st->single) {
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
 
        WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 
-       if (st->rollback) {
-               struct cpuhp_step *step = cpuhp_get_step(state);
-               if (step->skip_onerr)
-                       goto next;
-       }
-
        if (cpuhp_is_atomic_state(state)) {
                local_irq_disable();
                st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
                st->should_run = false;
        }
 
-next:
        cpuhp_lock_release(bringup);
 
        if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
 
 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
 {
-       for (st->state++; st->state < st->target; st->state++) {
-               struct cpuhp_step *step = cpuhp_get_step(st->state);
-
-               if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
-       }
+       for (st->state++; st->state < st->target; st->state++)
+               cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
 }
 
 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -934,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
-                       undo_cpu_down(cpu, st);
+                       if (st->state < prev_state)
+                               undo_cpu_down(cpu, st);
                        break;
                }
        }
@@ -987,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
-       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+       if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
                cpuhp_reset_state(st, prev_state);
                __cpuhp_kick_ap(st);
        }
index 1c35b7b945d034d320fb942109bb218c9c1eff8c..de87b0282e7420feaf4282e7ba835074b61512a7 100644 (file)
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
                return 0;
 #else
        /*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
         * architecture needs to use an IOMMU instead of the direct mapping.
         */
-       if (mask < DMA_BIT_MASK(32))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
 #endif
        /*
index 2a62b96600ad91dede076a2dc8ad4e7fc16afe09..c80549bf82c6628fea20d230edc8b824e9f36f4d 100644 (file)
@@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
        _perf_event_disable(bp);
 
        err = modify_user_hw_breakpoint_check(bp, attr, true);
-       if (err) {
-               if (!bp->attr.disabled)
-                       _perf_event_enable(bp);
 
-               return err;
-       }
-
-       if (!attr->disabled)
+       if (!bp->attr.disabled)
                _perf_event_enable(bp);
-       return 0;
+
+       return err;
 }
 
 static int perf_event_modify_attr(struct perf_event *event,
@@ -5948,6 +5943,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
                unsigned long sp;
                unsigned int rem;
                u64 dyn_size;
+               mm_segment_t fs;
 
                /*
                 * We dump:
@@ -5965,7 +5961,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
+               fs = get_fs();
+               set_fs(USER_DS);
                rem = __output_copy_user(handle, (void *) sp, dump_size);
+               set_fs(fs);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
index b3814fce5ecb6bf7729ea858ec17ddb9018105f1..d6b56180827c73f29fa21eef238c73c1a01540ad 100644 (file)
@@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a
  */
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
+       int err;
+
        /*
         * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
         * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -520,15 +522,12 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
        else
                perf_event_disable(bp);
 
-       if (!attr->disabled) {
-               int err = modify_user_hw_breakpoint_check(bp, attr, false);
+       err = modify_user_hw_breakpoint_check(bp, attr, false);
 
-               if (err)
-                       return err;
+       if (!bp->attr.disabled)
                perf_event_enable(bp);
-               bp->attr.disabled = 0;
-       }
-       return 0;
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
 
index d896e9ca38b0cccc5de00a0564ecd22179fa2c06..f0b58479534f07dfd55d57a117b4055d80c829b5 100644 (file)
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index 1f450e092c7416e3d43aba84da7b73b9a2a47b3d..11fc3bb456d65d666d1f80d062a11d3e00d41a3f 100644 (file)
@@ -3523,10 +3523,12 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
        switch (cmd) {
        case FUTEX_WAIT:
                val3 = FUTEX_BITSET_MATCH_ANY;
+               /* fall through */
        case FUTEX_WAIT_BITSET:
                return futex_wait(uaddr, flags, val, timeout, val3);
        case FUTEX_WAKE:
                val3 = FUTEX_BITSET_MATCH_ANY;
+               /* fall through */
        case FUTEX_WAKE_BITSET:
                return futex_wake(uaddr, flags, val, val3);
        case FUTEX_REQUEUE:
index 01ebdf1f9f40eb0b56810244bd115a62d07e88f2..2e62503bea0d7d4a598ff755bad1431bb166d790 100644 (file)
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
        case MODULE_STATE_COMING:
                ret = jump_label_add_module(mod);
                if (ret) {
-                       WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+                       WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
                        jump_label_del_module(mod);
                }
                break;
index a23e21ada81b9dc6cbc8cd21c04805072bb98438..02a0b01380d8ef678bf544054cacf586292fb0e1 100644 (file)
@@ -432,6 +432,7 @@ int sprint_backtrace(char *buffer, unsigned long address)
 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
 struct kallsym_iter {
        loff_t pos;
+       loff_t pos_arch_end;
        loff_t pos_mod_end;
        loff_t pos_ftrace_mod_end;
        unsigned long value;
@@ -443,9 +444,29 @@ struct kallsym_iter {
        int show_value;
 };
 
+int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
+                           char *type, char *name)
+{
+       return -EINVAL;
+}
+
+static int get_ksymbol_arch(struct kallsym_iter *iter)
+{
+       int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
+                                  &iter->value, &iter->type,
+                                  iter->name);
+
+       if (ret < 0) {
+               iter->pos_arch_end = iter->pos;
+               return 0;
+       }
+
+       return 1;
+}
+
 static int get_ksymbol_mod(struct kallsym_iter *iter)
 {
-       int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
+       int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
                                     &iter->value, &iter->type,
                                     iter->name, iter->module_name,
                                     &iter->exported);
@@ -501,32 +522,34 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
        iter->nameoff = get_symbol_offset(new_pos);
        iter->pos = new_pos;
        if (new_pos == 0) {
+               iter->pos_arch_end = 0;
                iter->pos_mod_end = 0;
                iter->pos_ftrace_mod_end = 0;
        }
 }
 
+/*
+ * The end position (last + 1) of each additional kallsyms section is recorded
+ * in iter->pos_..._end as each section is added, and so can be used to
+ * determine which get_ksymbol_...() function to call next.
+ */
 static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
 {
        iter->pos = pos;
 
-       if (iter->pos_ftrace_mod_end > 0 &&
-           iter->pos_ftrace_mod_end < iter->pos)
-               return get_ksymbol_bpf(iter);
+       if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
+           get_ksymbol_arch(iter))
+               return 1;
 
-       if (iter->pos_mod_end > 0 &&
-           iter->pos_mod_end < iter->pos) {
-               if (!get_ksymbol_ftrace_mod(iter))
-                       return get_ksymbol_bpf(iter);
+       if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
+           get_ksymbol_mod(iter))
                return 1;
-       }
 
-       if (!get_ksymbol_mod(iter)) {
-               if (!get_ksymbol_ftrace_mod(iter))
-                       return get_ksymbol_bpf(iter);
-       }
+       if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
+           get_ksymbol_ftrace_mod(iter))
+               return 1;
 
-       return 1;
+       return get_ksymbol_bpf(iter);
 }
 
 /* Returns false if pos at or past end of file. */
index e406c5fdb41e9423aca5fd8db32ec8e692983d3b..dd13f865ad40e985553217b5e557aaaac5eaa229 100644 (file)
@@ -55,7 +55,6 @@
 
 #include "lockdep_internals.h"
 
-#include <trace/events/preemptirq.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
index 1a81a1257b3f3aca67e00bbe9a3a0b4e465b0e4f..3f8a35104285ab29fe2e9992430e75b6aefbff89 100644 (file)
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
                /*
                 * wake_up_process() paired with set_current_state()
                 * inserts sufficient barriers to make sure @owner either sees
-                * it's wounded in __ww_mutex_lock_check_stamp() or has a
+                * it's wounded in __ww_mutex_check_kill() or has a
                 * wakeup pending to re-read the wounded state.
                 */
                if (owner != current)
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, current);
 
        lock_contended(&lock->dep_map, ip);
 
index 5b915b370d5a833fcf087bec26f06b8d07a59cda..0be047dbd8971dcd4ee1282936d3b3056f6ebfea 100644 (file)
@@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads)
                if (!cycle->result)
                        continue;
 
-               pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+               pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
                       n, nthreads, cycle->result);
                ret = -EINVAL;
                break;
index d57d58f77409214cf93ece9454354242ffd8dd85..5b8600d39931964adcef966b9660aceac5918124 100644 (file)
@@ -365,7 +365,6 @@ void __put_devmap_managed_page(struct page *page)
                __ClearPageActive(page);
                __ClearPageWaiters(page);
 
-               page->mapping = NULL;
                mem_cgroup_uncharge(page);
 
                page->pgmap->page_free(page, page->pgmap->data);
index de1cfc4f75a2eb8df16e3d1018122226a1a30f14..cdf63e53a01425182d66f1c8883a249cedbaaa88 100644 (file)
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
                idr_preload_end();
 
                if (nr < 0) {
-                       retval = nr;
+                       retval = (nr == -ENOSPC) ? -EAGAIN : nr;
                        goto out_free;
                }
 
index 924e37fb1620b5440e3265b4cc3465dad50475c1..9bf5404397e0b224873456617437fc172617a637 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/kmsg_dump.h>
 #include <linux/syslog.h>
 #include <linux/cpu.h>
-#include <linux/notifier.h>
 #include <linux/rculist.h>
 #include <linux/poll.h>
 #include <linux/irq_work.h>
@@ -352,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
  */
 
 enum log_flags {
-       LOG_NOCONS      = 1,    /* suppress print, do not print to console */
        LOG_NEWLINE     = 2,    /* text ended with a newline */
        LOG_PREFIX      = 4,    /* text started with a prefix */
        LOG_CONT        = 8,    /* text is a fragment of a continuation line */
@@ -1882,9 +1880,6 @@ int vprintk_store(int facility, int level,
        if (dict)
                lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-       if (suppress_message_printing(level))
-               lflags |= LOG_NOCONS;
-
        return log_output(facility, level, lflags,
                          dict, dictlen, text, text_len);
 }
@@ -2033,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
                                 const char *text, size_t len) {}
 static size_t msg_print_text(const struct printk_log *msg,
                             bool syslog, char *buf, size_t size) { return 0; }
+static bool suppress_message_printing(int level) { return false; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -2369,10 +2365,11 @@ skip:
                        break;
 
                msg = log_from_idx(console_idx);
-               if (msg->flags & LOG_NOCONS) {
+               if (suppress_message_printing(msg->level)) {
                        /*
-                        * Skip record if !ignore_loglevel, and
-                        * record has level above the console loglevel.
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it, and
+                        * record that has level above the console loglevel.
                         */
                        console_idx = log_next(console_idx);
                        console_seq++;
index a0a74c533e4b755ea73b876b51e3839c272c19cf..0913b4d385de307049eead641cf0bba8ea9e7695 100644 (file)
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
        return printk_safe_log_store(s, fmt, args);
 }
 
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
 {
        this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
 }
 
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
 {
        this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
 }
index 60caf1fb94e05778067a06200ae1a66a255f75bb..6383aa6a60ca02b255077ac98e33589e0fddb3fe 100644 (file)
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       static_key_disable(&sched_feat_keys[i]);
+       static_key_disable_cpuslocked(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       static_key_enable(&sched_feat_keys[i]);
+       static_key_enable_cpuslocked(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
 
        /* Ensure the static_key remains in a consistent state */
        inode = file_inode(filp);
+       cpus_read_lock();
        inode_lock(inode);
        ret = sched_feat_set(cmp);
        inode_unlock(inode);
+       cpus_read_unlock();
        if (ret < 0)
                return ret;
 
index b39fb596f6c1e8e1db95702080df00908d16df86..f808ddf2a868e7dbfbac8d7bcf5809b876dc51c9 100644 (file)
@@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
  * @cfs_rq: cfs_rq to attach to
  * @se: sched_entity to attach
+ * @flags: migration hints
  *
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
@@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq, *pos;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
        bool done = true;
 
@@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
                if (cfs_rq_has_blocked(cfs_rq))
                        done = false;
        }
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
        /* Don't need periodic decay once load/util_avg are null */
        if (others_have_blocked(rq))
@@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq = &rq->cfs;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long scale_rt_capacity(int cpu)
+static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+       unsigned long max = arch_scale_cpu_capacity(sd, cpu);
        unsigned long used, free;
        unsigned long irq;
 
@@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long capacity = scale_rt_capacity(cpu);
+       unsigned long capacity = scale_rt_capacity(sd, cpu);
        struct sched_group *sdg = sd->groups;
 
        cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 force_balance:
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(env, &sds);
-       return sds.busiest;
+       return env->imbalance ? sds.busiest : NULL;
 
 out_balanced:
        env->imbalance = 0;
@@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
         * - A task which has been woken up by try_to_wake_up() and
         *   waiting for actually being woken up by sched_ttwu_pending().
         */
-       if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+       if (!se->sum_exec_runtime ||
+           (p->state == TASK_WAKING && p->sched_remote_wakeup))
                return true;
 
        return false;
index 56a0fed30c0a8786a47c600e90c4016140b1fb58..505a41c42b96107247e8b4a1f7259d4d883e4ec2 100644 (file)
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
 
        n = sched_max_numa_distance;
 
-       if (sched_domains_numa_levels <= 1) {
+       if (sched_domains_numa_levels <= 2) {
                sched_numa_topology_type = NUMA_DIRECT;
                return;
        }
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
                        break;
        }
 
-       if (!level)
-               return;
-
        /*
         * 'level' contains the number of unique distances
         *
index cf5c67533ff1e1c02e296ed34d19501dd5f794dd..123bd73046ec6b19324452f0b9563617d7e40630 100644 (file)
@@ -71,9 +71,6 @@
 #include <asm/io.h>
 #include <asm/unistd.h>
 
-/* Hardening for Spectre-v1 */
-#include <linux/nospec.h>
-
 #include "uid16.h"
 
 #ifndef SET_UNALIGN_CTL
index f74fb00d806444739f9d8ee1611a98c694325f95..0e6e97a01942d956e6135025003371537ca28ab0 100644 (file)
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
        spin_unlock_irqrestore(&watchdog_lock, *flags);
 }
 
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
 /*
  * Interval: 0.5sec Threshold: 0.0625s
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+       /*
+        * We cannot directly run clocksource_watchdog_kthread() here, because
+        * clocksource_select() calls timekeeping_notify() which uses
+        * stop_machine(). One cannot use stop_machine() from a workqueue() due
+        * lock inversions wrt CPU hotplug.
+        *
+        * Also, we only ever run this work once or twice during the lifetime
+        * of the kernel, so there is no point in creating a more permanent
+        * kthread for this.
+        *
+        * If kthread_run fails the next watchdog scan over the
+        * watchdog_list will find the unstable clock again.
+        */
+       kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
 static void __clocksource_unstable(struct clocksource *cs)
 {
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
        /*
-        * If the clocksource is registered clocksource_watchdog_work() will
+        * If the clocksource is registered clocksource_watchdog_kthread() will
         * re-rate and re-select.
         */
        if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
        if (cs->mark_unstable)
                cs->mark_unstable(cs);
 
-       /* kick clocksource_watchdog_work() */
+       /* kick clocksource_watchdog_kthread() */
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
  * @cs:                clocksource to be marked unstable
  *
  * This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 {
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
        }
 }
 
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
 {
        struct clocksource *cs, *tmp;
        unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
        return select;
 }
 
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
 {
        mutex_lock(&clocksource_mutex);
-       if (__clocksource_watchdog_work())
+       if (__clocksource_watchdog_kthread())
                clocksource_select();
        mutex_unlock(&clocksource_mutex);
+       return 0;
 }
 
 static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
        /*
         * Run the watchdog first to eliminate unstable clock sources
         */
-       __clocksource_watchdog_work();
+       __clocksource_watchdog_kthread();
        clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
index 1d92d4a982fd468385b4b81b145d6e6353a766d4..65bd4616220db72b5e817cd0d1fdf1d4958a9501 100644 (file)
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
        tmp_iter_page = first_page;
 
        do {
+               cond_resched();
+
                to_remove_page = tmp_iter_page;
                rb_inc_page(cpu_buffer, &tmp_iter_page);
 
index 5470dce212c0dbc9861d5d2108b926a473f89de4..977918d5d3501b1447781c91aedbe8fadad05ada 100644 (file)
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
  * entering idle state.  This should only be used for scheduler events.
  * Use touch_softlockup_watchdog() for everything else.
  */
-void touch_softlockup_watchdog_sched(void)
+notrace void touch_softlockup_watchdog_sched(void)
 {
        /*
         * Preemption can be enabled.  It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
        raw_cpu_write(watchdog_touch_ts, 0);
 }
 
-void touch_softlockup_watchdog(void)
+notrace void touch_softlockup_watchdog(void)
 {
        touch_softlockup_watchdog_sched();
        wq_watchdog_touch(raw_smp_processor_id());
index 1f7020d65d0aa2963708d3e44057e08ce2305ead..71381168dedef4e88382a1849412f554a4cb4a56 100644 (file)
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
 static unsigned long hardlockup_allcpu_dumped;
 static atomic_t watchdog_cpus = ATOMIC_INIT(0);
 
-void arch_touch_nmi_watchdog(void)
+notrace void arch_touch_nmi_watchdog(void)
 {
        /*
         * Using __raw here because some code paths have
index 60e80198c3df2af0b12df2e5edda9d6b2db80b88..0280deac392e25c833cda56a0d193507998b6de4 100644 (file)
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
        mod_timer(&wq_watchdog_timer, jiffies + thresh);
 }
 
-void wq_watchdog_touch(int cpu)
+notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
index 3589765141a875aa96d4b785296071e2f29aff11..4966c4fbe7f735a1eae8d7e5de7e96022cb02862 100644 (file)
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
          time.  This is really bad from a security perspective, and
          so architecture maintainers really need to do what they can
          to get the CRNG seeded sooner after the system is booted.
-         However, since users can not do anything actionble to
+         However, since users cannot do anything actionable to
          address this, by default the kernel will issue only a single
          warning for the first use of unseeded randomness.
 
          Say Y here if you want to receive warnings for all uses of
          unseeded randomness.  This will be of use primarily for
-         those developers interersted in improving the security of
+         those developers interested in improving the security of
          Linux kernels running on their architecture (or
          subarchitecture).
 
@@ -1833,6 +1833,9 @@ config TEST_HASH
          This is intended to help people writing architecture-specific
          optimized versions.  If unsure, say N.
 
+config TEST_IDA
+       tristate "Perform selftest on IDA functions"
+
 config TEST_PARMAN
        tristate "Perform selftest on priority array manager"
        depends on PARMAN
index 9baefb6cb1a11a54073e9e9fec7f06eccc876b58..ca3f7ebb900d8eba9397fcc4c112dff9a6bdb05d 100644 (file)
@@ -50,6 +50,7 @@ obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
 obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
 obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
+obj-$(CONFIG_TEST_IDA) += test_ida.o
 obj-$(CONFIG_TEST_KASAN) += test_kasan.o
 CFLAGS_test_kasan.o += -fno-builtin
 obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
index ed9c169c12bdc966448d96b79de30f899e185a34..fab2fd5bc326bef8bdc9277485d9fe0d4ec50733 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -317,18 +317,12 @@ EXPORT_SYMBOL(idr_replace);
  * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
  * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
  * then initialise it using ida_init()).  To allocate a new ID, call
- * ida_simple_get().  To free an ID, call ida_simple_remove().
+ * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
+ * To free an ID, call ida_free().
  *
- * If you have more complex locking requirements, use a loop around
- * ida_pre_get() and ida_get_new() to allocate a new ID.  Then use
- * ida_remove() to free an ID.  You must make sure that ida_get_new() and
- * ida_remove() cannot be called at the same time as each other for the
- * same IDA.
- *
- * You can also use ida_get_new_above() if you need an ID to be allocated
- * above a particular number.  ida_destroy() can be used to dispose of an
- * IDA without needing to free the individual IDs in it.  You can use
- * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
+ * ida_destroy() can be used to dispose of an IDA without needing to
+ * free the individual IDs in it.  You can use ida_is_empty() to find
+ * out whether the IDA has any IDs currently allocated.
  *
  * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
  * limitation, it should be quite straightforward to raise the maximum.
@@ -369,25 +363,7 @@ EXPORT_SYMBOL(idr_replace);
 
 #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
 
-/**
- * ida_get_new_above - allocate new ID above or equal to a start id
- * @ida: ida handle
- * @start: id to start search at
- * @id: pointer to the allocated handle
- *
- * Allocate new ID above or equal to @start.  It should be called
- * with any required locks to ensure that concurrent calls to
- * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
- * Consider using ida_simple_get() if you do not have complex locking
- * requirements.
- *
- * If memory is required, it will return %-EAGAIN, you should unlock
- * and go back to the ida_pre_get() call.  If the ida is full, it will
- * return %-ENOSPC.  On success, it will return 0.
- *
- * @id returns a value in the range @start ... %0x7fffffff.
- */
-int ida_get_new_above(struct ida *ida, int start, int *id)
+static int ida_get_new_above(struct ida *ida, int start)
 {
        struct radix_tree_root *root = &ida->ida_rt;
        void __rcu **slot;
@@ -426,8 +402,8 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
                        if (ebit < BITS_PER_LONG) {
                                tmp |= 1UL << ebit;
                                rcu_assign_pointer(*slot, (void *)tmp);
-                               *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
-                               return 0;
+                               return new + ebit -
+                                       RADIX_TREE_EXCEPTIONAL_SHIFT;
                        }
                        bitmap = this_cpu_xchg(ida_bitmap, NULL);
                        if (!bitmap)
@@ -458,8 +434,7 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
                                                RADIX_TREE_EXCEPTIONAL_ENTRY);
                                radix_tree_iter_replace(root, &iter, slot,
                                                bitmap);
-                               *id = new;
-                               return 0;
+                               return new;
                        }
                        bitmap = this_cpu_xchg(ida_bitmap, NULL);
                        if (!bitmap)
@@ -468,20 +443,11 @@ int ida_get_new_above(struct ida *ida, int start, int *id)
                        radix_tree_iter_replace(root, &iter, slot, bitmap);
                }
 
-               *id = new;
-               return 0;
+               return new;
        }
 }
-EXPORT_SYMBOL(ida_get_new_above);
 
-/**
- * ida_remove - Free the given ID
- * @ida: ida handle
- * @id: ID to free
- *
- * This function should not be called at the same time as ida_get_new_above().
- */
-void ida_remove(struct ida *ida, int id)
+static void ida_remove(struct ida *ida, int id)
 {
        unsigned long index = id / IDA_BITMAP_BITS;
        unsigned offset = id % IDA_BITMAP_BITS;
@@ -518,99 +484,90 @@ void ida_remove(struct ida *ida, int id)
        }
        return;
  err:
-       WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
+       WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
 }
-EXPORT_SYMBOL(ida_remove);
 
 /**
- * ida_destroy - Free the contents of an ida
- * @ida: ida handle
+ * ida_destroy() - Free all IDs.
+ * @ida: IDA handle.
+ *
+ * Calling this function frees all IDs and releases all resources used
+ * by an IDA.  When this call returns, the IDA is empty and can be reused
+ * or freed.  If the IDA is already empty, there is no need to call this
+ * function.
  *
- * Calling this function releases all resources associated with an IDA.  When
- * this call returns, the IDA is empty and can be reused or freed.  The caller
- * should not allow ida_remove() or ida_get_new_above() to be called at the
- * same time.
+ * Context: Any context.
  */
 void ida_destroy(struct ida *ida)
 {
+       unsigned long flags;
        struct radix_tree_iter iter;
        void __rcu **slot;
 
+       xa_lock_irqsave(&ida->ida_rt, flags);
        radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
                struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
                if (!radix_tree_exception(bitmap))
                        kfree(bitmap);
                radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
        }
+       xa_unlock_irqrestore(&ida->ida_rt, flags);
 }
 EXPORT_SYMBOL(ida_destroy);
 
 /**
- * ida_simple_get - get a new id.
- * @ida: the (initialized) ida.
- * @start: the minimum id (inclusive, < 0x8000000)
- * @end: the maximum id (exclusive, < 0x8000000 or 0)
- * @gfp_mask: memory allocation flags
- *
- * Allocates an id in the range start <= id < end, or returns -ENOSPC.
- * On memory allocation failure, returns -ENOMEM.
+ * ida_alloc_range() - Allocate an unused ID.
+ * @ida: IDA handle.
+ * @min: Lowest ID to allocate.
+ * @max: Highest ID to allocate.
+ * @gfp: Memory allocation flags.
  *
- * Compared to ida_get_new_above() this function does its own locking, and
- * should be used unless there are special requirements.
+ * Allocate an ID between @min and @max, inclusive.  The allocated ID will
+ * not exceed %INT_MAX, even if @max is larger.
  *
- * Use ida_simple_remove() to get rid of an id.
+ * Context: Any context.
+ * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
+ * or %-ENOSPC if there are no free IDs.
  */
-int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
-                  gfp_t gfp_mask)
+int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
+                       gfp_t gfp)
 {
-       int ret, id;
-       unsigned int max;
+       int id = 0;
        unsigned long flags;
 
-       BUG_ON((int)start < 0);
-       BUG_ON((int)end < 0);
+       if ((int)min < 0)
+               return -ENOSPC;
 
-       if (end == 0)
-               max = 0x80000000;
-       else {
-               BUG_ON(end < start);
-               max = end - 1;
-       }
+       if ((int)max < 0)
+               max = INT_MAX;
 
 again:
-       if (!ida_pre_get(ida, gfp_mask))
-               return -ENOMEM;
-
        xa_lock_irqsave(&ida->ida_rt, flags);
-       ret = ida_get_new_above(ida, start, &id);
-       if (!ret) {
-               if (id > max) {
-                       ida_remove(ida, id);
-                       ret = -ENOSPC;
-               } else {
-                       ret = id;
-               }
+       id = ida_get_new_above(ida, min);
+       if (id > (int)max) {
+               ida_remove(ida, id);
+               id = -ENOSPC;
        }
        xa_unlock_irqrestore(&ida->ida_rt, flags);
 
-       if (unlikely(ret == -EAGAIN))
+       if (unlikely(id == -EAGAIN)) {
+               if (!ida_pre_get(ida, gfp))
+                       return -ENOMEM;
                goto again;
+       }
 
-       return ret;
+       return id;
 }
-EXPORT_SYMBOL(ida_simple_get);
+EXPORT_SYMBOL(ida_alloc_range);
 
 /**
- * ida_simple_remove - remove an allocated id.
- * @ida: the (initialized) ida.
- * @id: the id returned by ida_simple_get.
- *
- * Use to release an id allocated with ida_simple_get().
+ * ida_free() - Release an allocated ID.
+ * @ida: IDA handle.
+ * @id: Previously allocated ID.
  *
- * Compared to ida_remove() this function does its own locking, and should be
- * used unless there are special requirements.
+ * Context: Any context.
  */
-void ida_simple_remove(struct ida *ida, unsigned int id)
+void ida_free(struct ida *ida, unsigned int id)
 {
        unsigned long flags;
 
@@ -619,4 +576,4 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
        ida_remove(ida, id);
        xa_unlock_irqrestore(&ida->ida_rt, flags);
 }
-EXPORT_SYMBOL(ida_simple_remove);
+EXPORT_SYMBOL(ida_free);
index c72577e472f2f2929f825b0c105a237f54c42971..a66595ba5543e2a345da99b37313112e0dc1a4e6 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/percpu_counter.h>
-#include <linux/notifier.h>
 #include <linux/mutex.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
index a9e41aed6de4bfb063af9d16d41942a041a23fdd..bc03ecc4dfd2f69c8638cbff3270bc55793b7f6b 100644 (file)
@@ -120,7 +120,7 @@ bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
 static inline unsigned long
 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
 {
-       return slot - parent->slots;
+       return parent ? slot - parent->slots : 0;
 }
 
 static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
@@ -2106,14 +2106,6 @@ void idr_preload(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(idr_preload);
 
-/**
- * ida_pre_get - reserve resources for ida allocation
- * @ida: ida handle
- * @gfp: memory allocation flags
- *
- * This function should be called before calling ida_get_new_above().  If it
- * is unable to allocate memory, it will return %0.  On success, it returns %1.
- */
 int ida_pre_get(struct ida *ida, gfp_t gfp)
 {
        /*
@@ -2134,7 +2126,6 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
 
        return 1;
 }
-EXPORT_SYMBOL(ida_pre_get);
 
 void __rcu **idr_get_free(struct radix_tree_root *root,
                              struct radix_tree_iter *iter, gfp_t gfp,
index 310e29b5150737eac76503b91baf551da3bc47da..30526afa8343124f06f0649592ee246dc4d88fbe 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/rhashtable.h>
 #include <linux/err.h>
 #include <linux/export.h>
-#include <linux/rhashtable.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
diff --git a/lib/test_ida.c b/lib/test_ida.c
new file mode 100644 (file)
index 0000000..2d1637d
--- /dev/null
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_ida.c: Test the IDA API
+ * Copyright (c) 2016-2018 Microsoft Corporation
+ * Copyright (c) 2018 Oracle Corporation
+ * Author: Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/idr.h>
+#include <linux/module.h>
+
+static unsigned int tests_run;
+static unsigned int tests_passed;
+
+#ifdef __KERNEL__
+void ida_dump(struct ida *ida) { }
+#endif
+#define IDA_BUG_ON(ida, x) do {                                                \
+       tests_run++;                                                    \
+       if (x) {                                                        \
+               ida_dump(ida);                                          \
+               dump_stack();                                           \
+       } else {                                                        \
+               tests_passed++;                                         \
+       }                                                               \
+} while (0)
+
+/*
+ * Straightforward checks that allocating and freeing IDs work.
+ */
+static void ida_check_alloc(struct ida *ida)
+{
+       int i, id;
+
+       for (i = 0; i < 10000; i++)
+               IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+
+       ida_free(ida, 20);
+       ida_free(ida, 21);
+       for (i = 0; i < 3; i++) {
+               id = ida_alloc(ida, GFP_KERNEL);
+               IDA_BUG_ON(ida, id < 0);
+               if (i == 2)
+                       IDA_BUG_ON(ida, id != 10000);
+       }
+
+       for (i = 0; i < 5000; i++)
+               ida_free(ida, i);
+
+       IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
+       ida_destroy(ida);
+
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Destroy an IDA with a single entry at @base */
+static void ida_check_destroy_1(struct ida *ida, unsigned int base)
+{
+       IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
+       IDA_BUG_ON(ida, ida_is_empty(ida));
+       ida_destroy(ida);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/* Check that ida_destroy and ida_is_empty work */
+static void ida_check_destroy(struct ida *ida)
+{
+       /* Destroy an already-empty IDA */
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+       ida_destroy(ida);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+       ida_check_destroy_1(ida, 0);
+       ida_check_destroy_1(ida, 1);
+       ida_check_destroy_1(ida, 1023);
+       ida_check_destroy_1(ida, 1024);
+       ida_check_destroy_1(ida, 12345678);
+}
+
+/*
+ * Check what happens when we fill a leaf and then delete it.  This may
+ * discover mishandling of IDR_FREE.
+ */
+static void ida_check_leaf(struct ida *ida, unsigned int base)
+{
+       unsigned long i;
+
+       for (i = 0; i < IDA_BITMAP_BITS; i++) {
+               IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+                               base + i);
+       }
+
+       ida_destroy(ida);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+       IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
+       IDA_BUG_ON(ida, ida_is_empty(ida));
+       ida_free(ida, 0);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+/*
+ * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
+ * Allocating up to 2^31-1 should succeed, and then allocating the next one
+ * should fail.
+ */
+static void ida_check_max(struct ida *ida)
+{
+       unsigned long i, j;
+
+       for (j = 1; j < 65537; j *= 2) {
+               unsigned long base = (1UL << 31) - j;
+               for (i = 0; i < j; i++) {
+                       IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+                                       base + i);
+               }
+               IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
+                               -ENOSPC);
+               ida_destroy(ida);
+               IDA_BUG_ON(ida, !ida_is_empty(ida));
+       }
+}
+
+/*
+ * Check handling of conversions between exceptional entries and full bitmaps.
+ */
+static void ida_check_conv(struct ida *ida)
+{
+       unsigned long i;
+
+       for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
+               IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
+               IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
+                                       GFP_KERNEL) != i + BITS_PER_LONG);
+               ida_free(ida, i + 1);
+               ida_free(ida, i + BITS_PER_LONG);
+               IDA_BUG_ON(ida, !ida_is_empty(ida));
+       }
+
+       for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
+               IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+       for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
+               ida_free(ida, i - 1);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+
+       for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
+               IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
+       for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
+               ida_free(ida, i - 1);
+       IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
+static int ida_checks(void)
+{
+       DEFINE_IDA(ida);
+
+       IDA_BUG_ON(&ida, !ida_is_empty(&ida));
+       ida_check_alloc(&ida);
+       ida_check_destroy(&ida);
+       ida_check_leaf(&ida, 0);
+       ida_check_leaf(&ida, 1024);
+       ida_check_leaf(&ida, 1024 * 64);
+       ida_check_max(&ida);
+       ida_check_conv(&ida);
+
+       printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
+       return (tests_run != tests_passed) ? 0 : -EINVAL;
+}
+
+static void ida_exit(void)
+{
+}
+
+module_init(ida_checks);
+module_exit(ida_exit);
+MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
+MODULE_LICENSE("GPL");
index a550635ea5c353a8f226f7d1b429706a379a46a7..de64ea658716a8f1c63fcc03da45711c5d59f137 100644 (file)
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
        depends on NO_BOOTMEM
        depends on SPARSEMEM
        depends on !NEED_PER_CPU_KM
+       depends on 64BIT
        help
          Ordinarily all struct pages are initialised during early boot in a
          single thread. On very large machines this can take a considerable
index 8716bdabe1e69f4758d1eedf9c6c8d9d98d2bec2..26ef77a3883b5c708659425229e975ac74069875 100644 (file)
@@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH
 mmu-$(CONFIG_MMU)      += process_vm_access.o
 endif
 
-obj-y                  := filemap.o mempool.o oom_kill.o \
+obj-y                  := filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
        obj-y           += bootmem.o
 endif
 
-obj-$(CONFIG_ADVISE_SYSCALLS)  += fadvise.o
 ifdef CONFIG_MMU
        obj-$(CONFIG_ADVISE_SYSCALLS)   += madvise.o
 endif
index 38c926520c9718b8929a72829931080a3a53502d..bd10aad8539a42bf5a1263518534946eeac6e050 100644 (file)
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-       pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+       pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
                "get_unmapped_area %px\n"
 #endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
                "tlb_flush_pending %d\n"
                "def_flags: %#lx(%pGv)\n",
 
-               mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+               mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 #ifdef CONFIG_MMU
                mm->get_unmapped_area,
 #endif
index 2d8376e3c6408bf1ea058570dad64079cae7632d..467bcd032037a905e991cefe9fe48922f2031036 100644 (file)
@@ -27,9 +27,9 @@
  * deactivate the pages and clear PG_Referenced.
  */
 
-int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+                          int advice)
 {
-       struct fd f = fdget(fd);
        struct inode *inode;
        struct address_space *mapping;
        struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
        pgoff_t start_index;
        pgoff_t end_index;
        unsigned long nrpages;
-       int ret = 0;
-
-       if (!f.file)
-               return -EBADF;
 
-       inode = file_inode(f.file);
-       if (S_ISFIFO(inode->i_mode)) {
-               ret = -ESPIPE;
-               goto out;
-       }
+       inode = file_inode(file);
+       if (S_ISFIFO(inode->i_mode))
+               return -ESPIPE;
 
-       mapping = f.file->f_mapping;
-       if (!mapping || len < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
+       mapping = file->f_mapping;
+       if (!mapping || len < 0)
+               return -EINVAL;
 
        bdi = inode_to_bdi(mapping->host);
 
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                        /* no bad return value, but ignore advice */
                        break;
                default:
-                       ret = -EINVAL;
+                       return -EINVAL;
                }
-               goto out;
+               return 0;
        }
 
        /*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
 
        switch (advice) {
        case POSIX_FADV_NORMAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_RANDOM:
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode |= FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               spin_lock(&file->f_lock);
+               file->f_mode |= FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_SEQUENTIAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages * 2;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages * 2;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_WILLNEED:
                /* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                 * Ignore return value because fadvise() shall return
                 * success even if filesystem can't retrieve a hint,
                 */
-               force_page_cache_readahead(mapping, f.file, start_index,
-                                          nrpages);
+               force_page_cache_readahead(mapping, file, start_index, nrpages);
                break;
        case POSIX_FADV_NOREUSE:
                break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                }
                break;
        default:
-               ret = -EINVAL;
+               return -EINVAL;
        }
-out:
+       return 0;
+}
+
+int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       if (file->f_op->fadvise)
+               return file->f_op->fadvise(file, offset, len, advice);
+
+       return generic_fadvise(file, offset, len, advice);
+}
+EXPORT_SYMBOL(vfs_fadvise);
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+{
+       struct fd f = fdget(fd);
+       int ret;
+
+       if (!f.file)
+               return -EBADF;
+
+       ret = vfs_fadvise(f.file, offset, len, advice);
+
        fdput(f);
        return ret;
 }
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
 }
 
 #endif
+#endif
index 0b05545916106cad2f5bd490af19514107a6b9c5..c968e49f7a0c527258a85b8c0259467e3b2924de 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -968,6 +968,8 @@ static void hmm_devmem_free(struct page *page, void *data)
 {
        struct hmm_devmem *devmem = data;
 
+       page->mapping = NULL;
+
        devmem->ops->free(devmem, page);
 }
 
index 08b544383d7467df273eac5b97c7c4e07d2e7e4a..533f9b00147d267644bcbf98da717329fb07c38f 100644 (file)
@@ -752,7 +752,7 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        spin_unlock(ptl);
 }
 
-int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
                        pmd_t *pmd, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
@@ -812,7 +812,7 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
        spin_unlock(ptl);
 }
 
-int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
                        pud_t *pud, pfn_t pfn, bool write)
 {
        pgprot_t pgprot = vma->vm_page_prot;
@@ -821,11 +821,11 @@ int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON(!pfn_t_devmap(pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
index 9a085d525bbce1bd4aabb4599236d2874c964850..17dd883198aeab0dccebe762a6b432dad2f14c9d 100644 (file)
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
 
        kmemleak_initialized = 1;
 
+       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warn("Failed to create the debugfs kmemleak file\n");
+
        if (kmemleak_error) {
                /*
                 * Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
                return -ENOMEM;
        }
 
-       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
-                                    &kmemleak_fops);
-       if (!dentry)
-               pr_warn("Failed to create the debugfs kmemleak file\n");
        mutex_lock(&scan_mutex);
        start_scan_thread();
        mutex_unlock(&scan_mutex);
index 4d3c922ea1a1cb7558e1b69a3e32000833442489..972a9eaa898b6ad889a4647ed207ad82bd5d0f4b 100644 (file)
@@ -631,11 +631,13 @@ static int madvise_inject_error(int behavior,
 
 
        for (; start < end; start += PAGE_SIZE << order) {
+               unsigned long pfn;
                int ret;
 
                ret = get_user_pages_fast(start, 1, 0, &page);
                if (ret != 1)
                        return ret;
+               pfn = page_to_pfn(page);
 
                /*
                 * When soft offlining hugepages, after migrating the page
@@ -651,17 +653,25 @@ static int madvise_inject_error(int behavior,
 
                if (behavior == MADV_SOFT_OFFLINE) {
                        pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
-                                               page_to_pfn(page), start);
+                                       pfn, start);
 
                        ret = soft_offline_page(page, MF_COUNT_INCREASED);
                        if (ret)
                                return ret;
                        continue;
                }
+
                pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
-                                               page_to_pfn(page), start);
+                               pfn, start);
 
-               ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED);
+               /*
+                * Drop the page reference taken by get_user_pages_fast(). In
+                * the absence of MF_COUNT_INCREASED the memory_failure()
+                * routine is responsible for pinning the page to prevent it
+                * from being released back to the page allocator.
+                */
+               put_page(page);
+               ret = memory_failure(pfn, 0);
                if (ret)
                        return ret;
        }
index 4ead5a4817de3ffbf1477cfe77b8f4f4802281c8..e79cb59552d9b0076a2c9cbfec3af9ebfa5c06e2 100644 (file)
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
        if (mem_cgroup_out_of_memory(memcg, mask, order))
                return OOM_SUCCESS;
 
-       WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
-               "This looks like a misconfiguration or a kernel bug.");
        return OOM_FAILED;
 }
 
index 192d0bbfc9ea58823b41f14e7a5a515932398eba..0cd3de3550f0830f507d286b0499789d7961171e 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/hugetlb.h>
 #include <linux/memory_hotplug.h>
 #include <linux/mm_inline.h>
+#include <linux/memremap.h>
 #include <linux/kfifo.h>
 #include <linux/ratelimit.h>
 #include <linux/page-isolation.h>
@@ -174,23 +175,52 @@ int hwpoison_filter(struct page *p)
 
 EXPORT_SYMBOL_GPL(hwpoison_filter);
 
+/*
+ * Kill all processes that have a poisoned page mapped and then isolate
+ * the page.
+ *
+ * General strategy:
+ * Find all processes having the page mapped and kill them.
+ * But we keep a page reference around so that the page is not
+ * actually freed yet.
+ * Then stash the page away
+ *
+ * There's no convenient way to get back to mapped processes
+ * from the VMAs. So do a brute-force search over all
+ * running processes.
+ *
+ * Remember that machine checks are not common (or rather
+ * if they are common you have other problems), so this shouldn't
+ * be a performance issue.
+ *
+ * Also there are some races possible while we get from the
+ * error detection to actually handle it.
+ */
+
+struct to_kill {
+       struct list_head nd;
+       struct task_struct *tsk;
+       unsigned long addr;
+       short size_shift;
+       char addr_valid;
+};
+
 /*
  * Send all the processes who have the page mapped a signal.
  * ``action optional'' if they are not immediately affected by the error
  * ``action required'' if error happened in current execution context
  */
-static int kill_proc(struct task_struct *t, unsigned long addr,
-                       unsigned long pfn, struct page *page, int flags)
+static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
 {
-       short addr_lsb;
+       struct task_struct *t = tk->tsk;
+       short addr_lsb = tk->size_shift;
        int ret;
 
        pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
                pfn, t->comm, t->pid);
-       addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
 
        if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
-               ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
+               ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr,
                                       addr_lsb, current);
        } else {
                /*
@@ -199,7 +229,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr,
                 * This could cause a loop when the user sets SIGBUS
                 * to SIG_IGN, but hopefully no one will do that?
                 */
-               ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
+               ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
                                      addr_lsb, t);  /* synchronous? */
        }
        if (ret < 0)
@@ -235,34 +265,39 @@ void shake_page(struct page *p, int access)
 }
 EXPORT_SYMBOL_GPL(shake_page);
 
-/*
- * Kill all processes that have a poisoned page mapped and then isolate
- * the page.
- *
- * General strategy:
- * Find all processes having the page mapped and kill them.
- * But we keep a page reference around so that the page is not
- * actually freed yet.
- * Then stash the page away
- *
- * There's no convenient way to get back to mapped processes
- * from the VMAs. So do a brute-force search over all
- * running processes.
- *
- * Remember that machine checks are not common (or rather
- * if they are common you have other problems), so this shouldn't
- * be a performance issue.
- *
- * Also there are some races possible while we get from the
- * error detection to actually handle it.
- */
-
-struct to_kill {
-       struct list_head nd;
-       struct task_struct *tsk;
-       unsigned long addr;
-       char addr_valid;
-};
+static unsigned long dev_pagemap_mapping_shift(struct page *page,
+               struct vm_area_struct *vma)
+{
+       unsigned long address = vma_address(page, vma);
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = pgd_offset(vma->vm_mm, address);
+       if (!pgd_present(*pgd))
+               return 0;
+       p4d = p4d_offset(pgd, address);
+       if (!p4d_present(*p4d))
+               return 0;
+       pud = pud_offset(p4d, address);
+       if (!pud_present(*pud))
+               return 0;
+       if (pud_devmap(*pud))
+               return PUD_SHIFT;
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
+               return 0;
+       if (pmd_devmap(*pmd))
+               return PMD_SHIFT;
+       pte = pte_offset_map(pmd, address);
+       if (!pte_present(*pte))
+               return 0;
+       if (pte_devmap(*pte))
+               return PAGE_SHIFT;
+       return 0;
+}
 
 /*
  * Failure handling: if we can't find or can't kill a process there's
@@ -293,6 +328,10 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
        }
        tk->addr = page_address_in_vma(p, vma);
        tk->addr_valid = 1;
+       if (is_zone_device_page(p))
+               tk->size_shift = dev_pagemap_mapping_shift(p, vma);
+       else
+               tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
        /*
         * In theory we don't have to kill when the page was
@@ -300,7 +339,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
         * likely very rare kill anyways just out of paranoia, but use
         * a SIGKILL because the error is not contained anymore.
         */
-       if (tk->addr == -EFAULT) {
+       if (tk->addr == -EFAULT || tk->size_shift == 0) {
                pr_info("Memory failure: Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
                tk->addr_valid = 0;
@@ -318,9 +357,8 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
  * Also when FAIL is set do a force kill because something went
  * wrong earlier.
  */
-static void kill_procs(struct list_head *to_kill, int forcekill,
-                         bool fail, struct page *page, unsigned long pfn,
-                         int flags)
+static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
+               unsigned long pfn, int flags)
 {
        struct to_kill *tk, *next;
 
@@ -343,8 +381,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill,
                         * check for that, but we need to tell the
                         * process anyways.
                         */
-                       else if (kill_proc(tk->tsk, tk->addr,
-                                             pfn, page, flags) < 0)
+                       else if (kill_proc(tk, pfn, flags) < 0)
                                pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
                }
@@ -516,6 +553,7 @@ static const char * const action_page_types[] = {
        [MF_MSG_TRUNCATED_LRU]          = "already truncated LRU page",
        [MF_MSG_BUDDY]                  = "free buddy page",
        [MF_MSG_BUDDY_2ND]              = "free buddy page (2nd try)",
+       [MF_MSG_DAX]                    = "dax page",
        [MF_MSG_UNKNOWN]                = "unknown page",
 };
 
@@ -1013,7 +1051,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * any accesses to the poisoned memory.
         */
        forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
-       kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
+       kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
 
        return unmap_success;
 }
@@ -1113,6 +1151,83 @@ out:
        return res;
 }
 
+static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
+               struct dev_pagemap *pgmap)
+{
+       struct page *page = pfn_to_page(pfn);
+       const bool unmap_success = true;
+       unsigned long size = 0;
+       struct to_kill *tk;
+       LIST_HEAD(tokill);
+       int rc = -EBUSY;
+       loff_t start;
+
+       /*
+        * Prevent the inode from being freed while we are interrogating
+        * the address_space, typically this would be handled by
+        * lock_page(), but dax pages do not use the page lock. This
+        * also prevents changes to the mapping of this pfn until
+        * poison signaling is complete.
+        */
+       if (!dax_lock_mapping_entry(page))
+               goto out;
+
+       if (hwpoison_filter(page)) {
+               rc = 0;
+               goto unlock;
+       }
+
+       switch (pgmap->type) {
+       case MEMORY_DEVICE_PRIVATE:
+       case MEMORY_DEVICE_PUBLIC:
+               /*
+                * TODO: Handle HMM pages which may need coordination
+                * with device-side memory.
+                */
+               goto unlock;
+       default:
+               break;
+       }
+
+       /*
+        * Use this flag as an indication that the dax page has been
+        * remapped UC to prevent speculative consumption of poison.
+        */
+       SetPageHWPoison(page);
+
+       /*
+        * Unlike System-RAM there is no possibility to swap in a
+        * different physical page at a given virtual address, so all
+        * userspace consumption of ZONE_DEVICE memory necessitates
+        * SIGBUS (i.e. MF_MUST_KILL)
+        */
+       flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+       collect_procs(page, &tokill, flags & MF_ACTION_REQUIRED);
+
+       list_for_each_entry(tk, &tokill, nd)
+               if (tk->size_shift)
+                       size = max(size, 1UL << tk->size_shift);
+       if (size) {
+               /*
+                * Unmap the largest mapping to avoid breaking up
+                * device-dax mappings which are constant size. The
+                * actual size of the mapping being torn down is
+                * communicated in siginfo, see kill_proc()
+                */
+               start = (page->index << PAGE_SHIFT) & ~(size - 1);
+               unmap_mapping_range(page->mapping, start, start + size, 0);
+       }
+       kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
+       rc = 0;
+unlock:
+       dax_unlock_mapping_entry(page);
+out:
+       /* drop pgmap ref acquired in caller */
+       put_dev_pagemap(pgmap);
+       action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
+       return rc;
+}
+
 /**
  * memory_failure - Handle memory failure of a page.
  * @pfn: Page Number of the corrupted page
@@ -1135,6 +1250,7 @@ int memory_failure(unsigned long pfn, int flags)
        struct page *p;
        struct page *hpage;
        struct page *orig_head;
+       struct dev_pagemap *pgmap;
        int res;
        unsigned long page_flags;
 
@@ -1147,6 +1263,10 @@ int memory_failure(unsigned long pfn, int flags)
                return -ENXIO;
        }
 
+       pgmap = get_dev_pagemap(pfn, NULL);
+       if (pgmap)
+               return memory_failure_dev_pagemap(pfn, flags, pgmap);
+
        p = pfn_to_page(pfn);
        if (PageHuge(p))
                return memory_failure_hugetlb(pfn, flags);
@@ -1777,6 +1897,14 @@ int soft_offline_page(struct page *page, int flags)
        int ret;
        unsigned long pfn = page_to_pfn(page);
 
+       if (is_zone_device_page(page)) {
+               pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
+                               pfn);
+               if (flags & MF_COUNT_INCREASED)
+                       put_page(page);
+               return -EIO;
+       }
+
        if (PageHWPoison(page)) {
                pr_info("soft offline: %#lx page already poisoned\n", pfn);
                if (flags & MF_COUNT_INCREASED)
index 9eea6e809a4e99fcf11125d47f28d7493af8b0ab..38d94b703e9d4932279b5657a2c889dfaaf1b749 100644 (file)
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (__PageMovable(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (page_huge_active(page))
+                               if (hugepage_migration_supported(page_hstate(page)) &&
+                                   page_huge_active(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,
index b5b25e4dcbbb707e5d7ad0ae588a88ca2dc49e88..f10aa5360616ea3247b6e508ed28ace237d70ad9 100644 (file)
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
 
                        tlb_gather_mmu(&tlb, mm, start, end);
                        if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+                               tlb_finish_mmu(&tlb, start, end);
                                ret = false;
                                continue;
                        }
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
        }
 
        select_bad_process(oc);
-       /* Found nothing?!?! Either we hang forever, or we panic. */
-       if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+       /* Found nothing?!?! */
+       if (!oc->chosen) {
                dump_header(oc, NULL);
-               panic("Out of memory and no killable processes...\n");
+               pr_warn("Out of memory and no killable processes...\n");
+               /*
+                * If we got here due to an actual allocation at the
+                * system level, we cannot survive this and will enter
+                * an endless loop in the allocator. Bail out now.
+                */
+               if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+                       panic("System is deadlocked on memory\n");
        }
        if (oc->chosen && oc->chosen != (void *)-1UL)
                oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
index 6551d3b0dc30a5e5a951f24f27a5586cbe920237..84ae9bf5858ac9dcf7e1155e514a9b9062e0f10d 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/mpage.h>
 #include <linux/rmap.h>
 #include <linux/percpu.h>
-#include <linux/notifier.h>
 #include <linux/smp.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
index e75865d58ba70c42d2dd47e74fd27cb6484c1d6e..89d2a2ab3fe68c3ae46104074c519c7500dd86cb 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/oom.h>
-#include <linux/notifier.h>
 #include <linux/topology.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
@@ -7709,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * handle each tail page individually in migration.
                 */
                if (PageHuge(page)) {
+
+                       if (!hugepage_migration_supported(page_hstate(page)))
+                               goto unmovable;
+
                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
                        continue;
                }
index a59ea70527b9bc46b4fde403a7f19b93493351fb..4e630143a0ba8549c9a63f106c7dd18b7aaa4d18 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/file.h>
 #include <linux/mm_inline.h>
 #include <linux/blk-cgroup.h>
+#include <linux/fadvise.h>
 
 #include "internal.h"
 
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-            pgoff_t index, unsigned long nr)
-{
-       if (!mapping || !mapping->a_ops)
-               return -EINVAL;
-
-       /*
-        * Readahead doesn't make sense for DAX inodes, but we don't want it
-        * to report a failure either.  Instead, we just return success and
-        * don't do any work.
-        */
-       if (dax_mapping(mapping))
-               return 0;
-
-       return force_page_cache_readahead(mapping, filp, index, nr);
-}
-
 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 
        ret = -EBADF;
        f = fdget(fd);
-       if (f.file) {
-               if (f.file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = f.file->f_mapping;
-                       pgoff_t start = offset >> PAGE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
-                       unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, f.file, start, len);
-               }
-               fdput(f);
-       }
+       if (!f.file || !(f.file->f_mode & FMODE_READ))
+               goto out;
+
+       /*
+        * The readahead() syscall is intended to run only on files
+        * that can execute readahead. If readahead is not possible
+        * on this file, then we must return -EINVAL.
+        */
+       ret = -EINVAL;
+       if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+           !S_ISREG(file_inode(f.file)->i_mode))
+               goto out;
+
+       ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+out:
+       fdput(f);
        return ret;
 }
 
index 0376c124b043be4d9be9f9fc233c3e03092ce428..446942677cd4b88fad61e9a869a901bc66fadc41 100644 (file)
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                        mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
+
+               lockdep_annotate_inode_mutex_key(inode);
        } else
                shmem_free_inode(sb);
        return inode;
index ce2b9e5cea771cb9f06e9bc6bb65151fd7281a95..8da34a8af53d58754ff4a394f26f6f252cbafbe0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,7 +19,6 @@
 #include <linux/slab.h>
 #include "slab.h"
 #include <linux/proc_fs.h>
-#include <linux/notifier.h>
 #include <linux/seq_file.h>
 #include <linux/kasan.h>
 #include <linux/cpu.h>
index d2890a4073321c65a5f85f94c505c31bc3b7edb5..9e3ebd2ef65f8925b0a1f894567c0e3d98665b34 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kvmalloc_node);
 
 /**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
  *
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
  */
 void kvfree(const void *addr)
 {
index ea517bef7dc552a10ac6ed424ca06aa50670c303..cdc32a3b02fa0d7efc9d874b6cb6a26d82bf24c1 100644 (file)
 #endif
 #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
 
-/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-       struct task_struct *g, *p;
-
-       count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
-       /*
-        * Single threaded tasks need not iterate the entire
-        * list of process. We can avoid the flushing as well
-        * since the mm's seqnum was increased and don't have
-        * to worry about other threads' seqnum. Current's
-        * flush will occur upon the next lookup.
-        */
-       if (atomic_read(&mm->mm_users) == 1)
-               return;
-
-       rcu_read_lock();
-       for_each_process_thread(g, p) {
-               /*
-                * Only flush the vmacache pointers as the
-                * mm seqnum is already set and curr's will
-                * be set upon invalidation when the next
-                * lookup is done.
-                */
-               if (mm == p->mm)
-                       vmacache_flush(p);
-       }
-       rcu_read_unlock();
-}
-
 /*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this
index 7e7d25504651d5b666f1706ff9a0743b3c62079f..c7ce2c1612259c45896bae9739a288966a28c970 100644 (file)
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
        delta = freeable >> priority;
        delta *= 4;
        do_div(delta, shrinker->seeks);
+
+       /*
+        * Make sure we apply some minimal pressure on default priority
+        * even on small cgroups. Stale objects are not only consuming memory
+        * by themselves, but can also hold a reference to a dying cgroup,
+        * preventing it from being reclaimed. A dying cgroup with all
+        * corresponding structures like per-cpu stats and kmem caches
+        * can be really big, so it may lead to a significant waste of memory.
+        */
+       delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
        total_scan += delta;
        if (total_scan < 0) {
                pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
index ae91e2d40056b7ad12db2d934c8f76c565769ec3..3a7b0773536b8e226546ebe417463fcba5c92ba4 100644 (file)
@@ -83,6 +83,7 @@ enum {
 
 struct smp_dev {
        /* Secure Connections OOB data */
+       bool                    local_oob;
        u8                      local_pk[64];
        u8                      local_rand[16];
        bool                    debug_key;
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
 
        memcpy(rand, smp->local_rand, 16);
 
+       smp->local_oob = true;
+
        return 0;
 }
 
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
         * successfully received our local OOB data - therefore set the
         * flag to indicate that local OOB is in use.
         */
-       if (req->oob_flag == SMP_OOB_PRESENT)
+       if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
                set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
 
        /* SMP over BR/EDR requires special treatment */
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
         * successfully received our local OOB data - therefore set the
         * flag to indicate that local OOB is in use.
         */
-       if (rsp->oob_flag == SMP_OOB_PRESENT)
+       if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
                set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
 
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -2697,7 +2700,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
         * key was set/generated.
         */
        if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
-               struct smp_dev *smp_dev = chan->data;
+               struct l2cap_chan *hchan = hdev->smp_data;
+               struct smp_dev *smp_dev;
+
+               if (!hchan || !hchan->data)
+                       return SMP_UNSPECIFIED;
+
+               smp_dev = hchan->data;
 
                tfm_ecdh = smp_dev->tfm_ecdh;
        } else {
@@ -3230,6 +3239,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
                return ERR_CAST(tfm_ecdh);
        }
 
+       smp->local_oob = false;
        smp->tfm_aes = tfm_aes;
        smp->tfm_cmac = tfm_cmac;
        smp->tfm_ecdh = tfm_ecdh;
index 325fc5088370b5b0f06daaaf990c5cee86dfecce..82114e1111e6558d5b8ecc2207aac679e21698c6 100644 (file)
@@ -93,7 +93,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/notifier.h>
 #include <linux/skbuff.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
index c25eb36f13204e0df620e6d9eb76783fe67ad243..5e00f2b85a5681a32cd905f0c02a303c5ef209af 100644 (file)
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+#define sk_msg_iter_var(var)                   \
+       do {                                    \
+               var++;                          \
+               if (var == MAX_SKB_FRAGS)       \
+                       var = 0;                \
+       } while (0)
+
 BPF_CALL_4(bpf_msg_pull_data,
           struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
 {
-       unsigned int len = 0, offset = 0, copy = 0;
+       unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
+       int bytes = end - start, bytes_sg_total;
        struct scatterlist *sg = msg->sg_data;
        int first_sg, last_sg, i, shift;
        unsigned char *p, *to, *from;
-       int bytes = end - start;
        struct page *page;
 
        if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
        i = msg->sg_start;
        do {
                len = sg[i].length;
-               offset += len;
                if (start < offset + len)
                        break;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               offset += len;
+               sk_msg_iter_var(i);
        } while (i != msg->sg_end);
 
        if (unlikely(start >= offset + len))
                return -EINVAL;
 
-       if (!msg->sg_copy[i] && bytes <= len)
-               goto out;
-
        first_sg = i;
+       /* The start may point into the sg element so we need to also
+        * account for the headroom.
+        */
+       bytes_sg_total = start - offset + bytes;
+       if (!msg->sg_copy[i] && bytes_sg_total <= len)
+               goto out;
 
        /* At this point we need to linearize multiple scatterlist
         * elements or a single shared page. Either way we need to
@@ -2327,37 +2335,33 @@ BPF_CALL_4(bpf_msg_pull_data,
         */
        do {
                copy += sg[i].length;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
-               if (bytes < copy)
+               sk_msg_iter_var(i);
+               if (bytes_sg_total <= copy)
                        break;
        } while (i != msg->sg_end);
        last_sg = i;
 
-       if (unlikely(copy < end - start))
+       if (unlikely(bytes_sg_total > copy))
                return -EINVAL;
 
-       page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
+       page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
+                          get_order(copy));
        if (unlikely(!page))
                return -ENOMEM;
        p = page_address(page);
-       offset = 0;
 
        i = first_sg;
        do {
                from = sg_virt(&sg[i]);
                len = sg[i].length;
-               to = p + offset;
+               to = p + poffset;
 
                memcpy(to, from, len);
-               offset += len;
+               poffset += len;
                sg[i].length = 0;
                put_page(sg_page(&sg[i]));
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (i != last_sg);
 
        sg[first_sg].length = copy;
@@ -2367,11 +2371,15 @@ BPF_CALL_4(bpf_msg_pull_data,
         * had a single entry though we can just replace it and
         * be done. Otherwise walk the ring and shift the entries.
         */
-       shift = last_sg - first_sg - 1;
+       WARN_ON_ONCE(last_sg == first_sg);
+       shift = last_sg > first_sg ?
+               last_sg - first_sg - 1 :
+               MAX_SKB_FRAGS - first_sg + last_sg - 1;
        if (!shift)
                goto out;
 
-       i = first_sg + 1;
+       i = first_sg;
+       sk_msg_iter_var(i);
        do {
                int move_from;
 
@@ -2388,15 +2396,13 @@ BPF_CALL_4(bpf_msg_pull_data,
                sg[move_from].page_link = 0;
                sg[move_from].offset = 0;
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (1);
        msg->sg_end -= shift;
        if (msg->sg_end < 0)
                msg->sg_end += MAX_SKB_FRAGS;
 out:
-       msg->data = sg_virt(&sg[i]) + start - offset;
+       msg->data = sg_virt(&sg[first_sg]) + start - offset;
        msg->data_end = msg->data + bytes;
 
        return 0;
@@ -7281,7 +7287,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_reuseport_md, ip_protocol):
-               BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+               BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
                SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
                                                    BPF_W, 0);
                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
index aa19d86937afabf5fb7dd29ce7c2379241281c06..91592fceeaad7225ed695aebef01fbc1fa4e284b 100644 (file)
@@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                lladdr = neigh->ha;
        }
 
+       /* Update confirmed timestamp for neighbour entry after we
+        * received ARP packet even if it doesn't change IP to MAC binding.
+        */
+       if (new & NUD_CONNECTED)
+               neigh->confirmed = jiffies;
+
        /* If entry was valid and address is not changed,
           do not change entry state, if new one is STALE.
         */
@@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                }
        }
 
-       /* Update timestamps only once we know we will make a change to the
+       /* Update timestamp only once we know we will make a change to the
         * neighbour entry. Otherwise we risk to move the locktime window with
         * noop updates and ignore relevant ARP updates.
         */
-       if (new != old || lladdr != neigh->ha) {
-               if (new & NUD_CONNECTED)
-                       neigh->confirmed = jiffies;
+       if (new != old || lladdr != neigh->ha)
                neigh->updated = jiffies;
-       }
 
        if (new != old) {
                neigh_del_timer(neigh);
index 738871af5efaa246b55c1803259e98f67a60ae54..670c84b1bfc23bbb9dde13b4df21a503350d390e 100644 (file)
@@ -1001,22 +1001,18 @@ static int register_pernet_operations(struct list_head *list,
        int error;
 
        if (ops->id) {
-again:
-               error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
-               if (error < 0) {
-                       if (error == -EAGAIN) {
-                               ida_pre_get(&net_generic_ids, GFP_KERNEL);
-                               goto again;
-                       }
+               error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
+                               GFP_KERNEL);
+               if (error < 0)
                        return error;
-               }
+               *ops->id = error;
                max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
        }
        error = __register_pernet_operations(list, ops);
        if (error) {
                rcu_barrier();
                if (ops->id)
-                       ida_remove(&net_generic_ids, *ops->id);
+                       ida_free(&net_generic_ids, *ops->id);
        }
 
        return error;
@@ -1027,7 +1023,7 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
        __unregister_pernet_operations(ops);
        rcu_barrier();
        if (ops->id)
-               ida_remove(&net_generic_ids, *ops->id);
+               ida_free(&net_generic_ids, *ops->id);
 }
 
 /**
index 24431e578310cb1602c05ea26a24069d49309c74..63ce2283a456a4aeabf3c10b681fe63195209101 100644 (file)
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
 
        rtnl_lock();
        tab = rtnl_msg_handlers[protocol];
+       if (!tab) {
+               rtnl_unlock();
+               return;
+       }
        RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
        for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
                link = tab[msgindex];
@@ -2806,7 +2810,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
        }
 
        if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
-               __dev_notify_flags(dev, old_flags, 0U);
+               __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
        } else {
                dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
                __dev_notify_flags(dev, old_flags, ~0U);
index c996c09d095f59c6a54ce28ff4c71bb0ac467c64..b2c807f67aba5847fa0c9f07adabbff7cf1afd22 100644 (file)
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 
        WARN_ON_ONCE(!in_task());
 
-       if (!sock_flag(sk, SOCK_ZEROCOPY))
-               return NULL;
-
        skb = sock_omalloc(sk, 0, GFP_KERNEL);
        if (!skb)
                return NULL;
index e63c554e0623e54971ed90af69ff0d0c35ecebad..9f3209ff7ffde754230720ca9117111fdbd7c3bc 100644 (file)
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
-#include <linux/of_gpio.h>
 #include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <linux/phy_fixed.h>
 #include <linux/ptp_classify.h>
-#include <linux/gpio/consumer.h>
 #include <linux/etherdevice.h>
 
 #include "dsa_priv.h"
index 962c4fd338ba57e4344eeb62a648de59a5460c36..1c45c1d6d241dbcca8542c51eb0f25b903131756 100644 (file)
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
        const struct tc_action *a;
        struct dsa_port *to_dp;
        int err = -EOPNOTSUPP;
-       LIST_HEAD(actions);
 
        if (!ds->ops->port_mirror_add)
                return err;
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
        if (!tcf_exts_has_one_action(cls->exts))
                return err;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       a = list_first_entry(&actions, struct tc_action, list);
+       a = tcf_exts_first_action(cls->exts);
 
        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
                struct dsa_mall_mirror_tc_entry *mirror;
index 20fda8fb8ffda40a9ec61dead5ebacdbc4c1bc4a..1fbe2f815474cd09331990d49afb140932cdd21b 100644 (file)
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (encap)
                        skb_reset_inner_headers(skb);
                skb->network_header = (u8 *)iph - skb->head;
+               skb_reset_mac_len(skb);
        } while ((skb = skb->next));
 
 out:
index cf75f8944b05eb302c11cd8e2e0e9f762ab7e35c..4da39446da2d89b529973eb33902577a0e6cbb54 100644 (file)
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
        spin_lock(&im->lock);
        im->tm_running = 0;
 
-       if (im->unsolicit_count) {
-               im->unsolicit_count--;
+       if (im->unsolicit_count && --im->unsolicit_count)
                igmp_start_timer(im, unsolicited_report_interval(in_dev));
-       }
+
        im->reporter = 1;
        spin_unlock(&im->lock);
 
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
 
        if (in_dev->dead)
                return;
+
+       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
                spin_lock_bh(&im->lock);
                igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
                              unsigned int mode)
 {
        struct ip_mc_list *im;
-#ifdef CONFIG_IP_MULTICAST
-       struct net *net = dev_net(in_dev->dev);
-#endif
 
        ASSERT_RTNL();
 
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
        timer_setup(&im->timer, igmp_timer_expire, 0);
-       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
        im->next_rcu = in_dev->mc_list;
index 88281fbce88ce8f1062b99594665766c2a5f5b74..e7227128df2c8fd54727c234f76043133809bd1e 100644 (file)
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
                        nextp = &fp->next;
                        fp->prev = NULL;
                        memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+                       fp->sk = NULL;
                        head->data_len += fp->len;
                        head->len += fp->len;
                        if (head->ip_summed != fp->ip_summed)
index 51a5d06085ac44777c622a58476d7700d7bc0a97..8cce0e9ea08cb454f28b4dfdd17bb2875470a7f6 100644 (file)
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
 
        if (tpi->proto == htons(ETH_P_TEB))
                itn = net_generic(net, gre_tap_net_id);
+       else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+                tpi->proto == htons(ETH_P_ERSPAN2))
+               itn = net_generic(net, erspan_net_id);
        else
                itn = net_generic(net, ipgre_net_id);
 
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
                return PACKET_RCVD;
        }
+       return PACKET_REJECT;
+
 drop:
        kfree_skb(skb);
        return PACKET_RCVD;
@@ -1508,11 +1513,14 @@ nla_put_failure:
 
 static void erspan_setup(struct net_device *dev)
 {
+       struct ip_tunnel *t = netdev_priv(dev);
+
        ether_setup(dev);
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        ip_tunnel_setup(dev, erspan_net_id);
+       t->erspan_ver = 1;
 }
 
 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
index d9504adc47b3df6e3311cd5860e34deb8efe6c95..184bf2e0a1edfce0dea43ef6d06fd8c8573f71a8 100644 (file)
@@ -106,6 +106,10 @@ config NF_NAT_IPV4
 
 if NF_NAT_IPV4
 
+config NF_NAT_MASQUERADE_IPV4
+       bool
+
+if NF_TABLES
 config NFT_CHAIN_NAT_IPV4
        depends on NF_TABLES_IPV4
        tristate "IPv4 nf_tables nat chain support"
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
          packet transformations such as the source, destination address and
          source and destination ports.
 
-config NF_NAT_MASQUERADE_IPV4
-       bool
-
 config NFT_MASQ_IPV4
        tristate "IPv4 masquerading support for nf_tables"
        depends on NF_TABLES_IPV4
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
        help
          This is the expression that provides IPv4 redirect support for
          nf_tables.
+endif # NF_TABLES
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
index b8af2fec5ad59a0ddd2590499e2c1e977646485c..10c6246396cc016b59fc3e368602684dd68566a2 100644 (file)
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
        flags = msg->msg_flags;
 
-       if (flags & MSG_ZEROCOPY && size) {
+       if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
                if (sk->sk_state != TCP_ESTABLISHED) {
                        err = -EINVAL;
                        goto out_err;
index 13d34427ca3dd5bee810395ba4a1ab9759863182..02ff2dde96094cf33b662a20994424a7adea509e 100644 (file)
@@ -95,11 +95,10 @@ struct bbr {
        u32     mode:3,              /* current bbr_mode in state machine */
                prev_ca_state:3,     /* CA state on previous ACK */
                packet_conservation:1,  /* use packet conservation? */
-               restore_cwnd:1,      /* decided to revert cwnd to old value */
                round_start:1,       /* start of packet-timed tx->ack round? */
                idle_restart:1,      /* restarting after idle? */
                probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
-               unused:12,
+               unused:13,
                lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
                lt_rtt_cnt:7,        /* round trips in long-term interval */
                lt_use_bw:1;         /* use lt_bw as our bw estimate? */
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
 /* If we estimate we're policed, use lt_bw for this many round trips: */
 static const u32 bbr_lt_bw_max_rtts = 48;
 
+static void bbr_check_probe_rtt_done(struct sock *sk);
+
 /* Do we estimate that STARTUP filled the pipe? */
 static bool bbr_full_bw_reached(const struct sock *sk)
 {
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
                 */
                if (bbr->mode == BBR_PROBE_BW)
                        bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+               else if (bbr->mode == BBR_PROBE_RTT)
+                       bbr_check_probe_rtt_done(sk);
        }
 }
 
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
                cwnd = tcp_packets_in_flight(tp) + acked;
        } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
                /* Exiting loss recovery; restore cwnd saved before recovery. */
-               bbr->restore_cwnd = 1;
+               cwnd = max(cwnd, bbr->prior_cwnd);
                bbr->packet_conservation = 0;
        }
        bbr->prev_ca_state = state;
 
-       if (bbr->restore_cwnd) {
-               /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
-               cwnd = max(cwnd, bbr->prior_cwnd);
-               bbr->restore_cwnd = 0;
-       }
-
        if (bbr->packet_conservation) {
                *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
                return true;    /* yes, using packet conservation */
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bbr *bbr = inet_csk_ca(sk);
-       u32 cwnd = 0, target_cwnd = 0;
+       u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
 
        if (!acked)
-               return;
+               goto done;  /* no packet fully ACKed; just apply caps */
 
        if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
                goto done;
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
                bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
 }
 
+static void bbr_check_probe_rtt_done(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+
+       if (!(bbr->probe_rtt_done_stamp &&
+             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
+               return;
+
+       bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
+       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+       bbr_reset_mode(sk);
+}
+
 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
  * periodically drain the bottleneck queue, to converge to measure the true
  * min_rtt (unloaded propagation delay). This allows the flows to keep queues
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
                } else if (bbr->probe_rtt_done_stamp) {
                        if (bbr->round_start)
                                bbr->probe_rtt_round_done = 1;
-                       if (bbr->probe_rtt_round_done &&
-                           after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
-                               bbr->min_rtt_stamp = tcp_jiffies32;
-                               bbr->restore_cwnd = 1;  /* snap to prior_cwnd */
-                               bbr_reset_mode(sk);
-                       }
+                       if (bbr->probe_rtt_round_done)
+                               bbr_check_probe_rtt_done(sk);
                }
        }
        /* Restart after idle ends only once we process a new S/ACK for data */
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
        bbr->has_seen_rtt = 0;
        bbr_init_pacing_rate_from_rtt(sk);
 
-       bbr->restore_cwnd = 0;
        bbr->round_start = 0;
        bbr->idle_restart = 0;
        bbr->full_bw_reached = 0;
index 4c2dd9f863f76eb6e55202b19e1acebf4fbac887..4cf2f7bb2802ad4ae968b5a6dfb9d005ed619c76 100644 (file)
@@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        if (!queue->synflood_warned &&
            net->ipv4.sysctl_tcp_syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
-               pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
-                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+               net_info_ratelimited("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
+                                    proto, ntohs(tcp_hdr(skb)->dest), msg);
 
        return want_cookie;
 }
index 9e041fa5c545367961f03fa8a9124aebbc1b6c69..44c09eddbb781c03da2417aaa925e360de01a6e9 100644 (file)
@@ -2517,6 +2517,12 @@ static int __net_init tcp_sk_init(struct net *net)
                if (res)
                        goto fail;
                sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+               /* Please enforce IP_DF and IPID==0 for RST and
+                * ACK sent in SYN-RECV and TIME-WAIT state.
+                */
+               inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
                *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
        }
 
index 75ef332a7caf44de619acf030977eba01565c70a..12affb7864d981a6494059232c4965aaee756803 100644 (file)
@@ -184,8 +184,9 @@ kill:
                                inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
+               } else {
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                }
-               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
index f4e35b2ff8b8dc3956da415cb6a29bb147e25f04..7d69dd6fa7e8c63929a27edad74fb0d6f9f3ee31 100644 (file)
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                                                         inet_compute_pseudo);
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+                              struct udphdr *uh)
+{
+       int ret;
+
+       if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+               skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+                                        inet_compute_pseudo);
+
+       ret = udp_queue_rcv_skb(sk, skb);
+
+       /* a return value > 0 means to resubmit the input, but
+        * it wants the return to be -protocol, or 0
+        */
+       if (ret > 0)
+               return -ret;
+       return 0;
+}
+
 /*
  *     All we need to do is get the socket, and then do a checksum.
  */
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                if (unlikely(sk->sk_rx_dst != dst))
                        udp_sk_rx_dst_set(sk, dst);
 
-               ret = udp_queue_rcv_skb(sk, skb);
+               ret = udp_unicast_rcv_skb(sk, skb, uh);
                sock_put(sk);
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
-               if (ret > 0)
-                       return -ret;
-               return 0;
+               return ret;
        }
 
        if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk) {
-               int ret;
-
-               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-                       skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-                                                inet_compute_pseudo);
-
-               ret = udp_queue_rcv_skb(sk, skb);
-
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
-               if (ret > 0)
-                       return -ret;
-               return 0;
-       }
+       if (sk)
+               return udp_unicast_rcv_skb(sk, skb, uh);
 
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto drop;
index 2fac4ad748672cd62de6653d3fdedebe743c6ad0..d51a8c0b3372d09ad1c78b76f94b4ebaf7ca3f61 100644 (file)
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
 
-       ip6_route_add(&cfg, GFP_ATOMIC, NULL);
+       ip6_route_add(&cfg, GFP_KERNEL, NULL);
 }
 
 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
        if (addr.s6_addr32[3]) {
                add_addr(idev, &addr, plen, scope);
                addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
-                                     GFP_ATOMIC);
+                                     GFP_KERNEL);
                return;
        }
 
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
 
                                add_addr(idev, &addr, plen, flag);
                                addrconf_prefix_route(&addr, plen, 0, idev->dev,
-                                                     0, pflags, GFP_ATOMIC);
+                                                     0, pflags, GFP_KERNEL);
                        }
                }
        }
index 673bba31eb1807eb04ee0d0c072333000bde4305..9a4261e502727005b3ea5e26795afd747533ee9e 100644 (file)
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
 
        err = proto_register(&pingv6_prot, 1);
        if (err)
-               goto out_unregister_ping_proto;
+               goto out_unregister_raw_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
         */
        err = rawv6_init();
        if (err)
-               goto out_unregister_raw_proto;
+               goto out_unregister_ping_proto;
 
        /* Register the family here so that the init calls below will
         * be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
 igmp_fail:
        ndisc_cleanup();
 ndisc_fail:
-       ip6_mr_cleanup();
+       icmpv6_cleanup();
 icmp_fail:
-       unregister_pernet_subsys(&inet6_net_ops);
+       ip6_mr_cleanup();
 ipmr_fail:
-       icmpv6_cleanup();
+       unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
        sock_unregister(PF_INET6);
        rtnl_unregister_all(PF_INET6);
index d212738e9d100d4e3270f9188466da6b8a3d186c..5516f55e214bd85ff7a07cf8c24648db327902c2 100644 (file)
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
                }
        }
 
+       lwtstate_put(f6i->fib6_nh.nh_lwtstate);
+
        if (f6i->fib6_nh.nh_dev)
                dev_put(f6i->fib6_nh.nh_dev);
 
@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                        fib6_clean_expires(iter);
                                else
                                        fib6_set_expires(iter, rt->expires);
-                               fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
+
+                               if (rt->fib6_pmtu)
+                                       fib6_metric_set(iter, RTAX_MTU,
+                                                       rt->fib6_pmtu);
                                return -EEXIST;
                        }
                        /* If we have the same destination and the same metric,
index 18a3794b0f52e1dd7b8bf5179bcdcfdbd882f158..e493b041d4ac9900d44972a22ebdc898def323fe 100644 (file)
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
 
+       parms->erspan_ver = 1;
        if (data[IFLA_GRE_ERSPAN_VER])
                parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
index 37ff4805b20c73fd3c404a0904985bae68f21f23..c7e495f1201105f1ac1724a7b8fd82399efcce32 100644 (file)
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                        payload_len = skb->len - nhoff - sizeof(*ipv6h);
                ipv6h->payload_len = htons(payload_len);
                skb->network_header = (u8 *)ipv6h - skb->head;
+               skb_reset_mac_len(skb);
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
index 16f200f06500758c4cae84ea16229d5dbce912cb..f9f8f554d141676a7d342f85088d12d9a6815e9d 100644 (file)
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                                kfree_skb(skb);
                                return -ENOBUFS;
                        }
+                       if (skb->sk)
+                               skb_set_owner_w(skb2, skb->sk);
                        consume_skb(skb);
                        skb = skb2;
-                       /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
-                        * it is safe to call in our context (socket lock not held)
-                        */
-                       skb_set_owner_w(skb, (struct sock *)sk);
                }
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
index 5df2a58d945cc2d37fc1a30c7b850d928dcd2ccf..419960b0ba16370ec9c47bee16aad15d9819f349 100644 (file)
@@ -1188,7 +1188,15 @@ route_lookup:
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_frag_opts(skb, &opt.ops, &proto);
        }
-       hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+
+       if (hop_limit == 0) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       hop_limit = ip_hdr(skb)->ttl;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       hop_limit = ipv6_hdr(skb)->hop_limit;
+               else
+                       hop_limit = ip6_dst_hoplimit(dst);
+       }
 
        /* Calculate max headroom for all the headers and adjust
         * needed_headroom if necessary.
index 38dec9da90d338b4704a8acd4586536b467f75b9..eeaf7455d51e52f12b62ffd0c1c82e2ee09e4fc1 100644 (file)
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        mtu = dst_mtu(dst);
-       if (!skb->ignore_df && skb->len > mtu) {
+       if (skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
        }
 
        t = rtnl_dereference(ip6n->tnls_wc[0]);
-       unregister_netdevice_queue(t->dev, list);
+       if (t)
+               unregister_netdevice_queue(t->dev, list);
 }
 
 static int __net_init vti6_init_net(struct net *net)
index 2a14d8b65924d56c79380fed6d867cf62db42e33..8f68a518d9db7a6f4d489972be3efa68315059a6 100644 (file)
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
+               fp->sk = NULL;
        }
        sub_frag_mem_limit(fq->q.net, head->truesize);
 
index 7208c16302f61adc15636f6a332ff0c02325cfcf..480a79f47c52383e3e254b7ca3e7bd16e473da2f 100644 (file)
@@ -946,8 +946,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
 
 static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
 {
-       rt->dst.flags |= fib6_info_dst_flags(ort);
-
        if (ort->fib6_flags & RTF_REJECT) {
                ip6_rt_init_dst_reject(rt, ort);
                return;
@@ -956,7 +954,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
        rt->dst.error = 0;
        rt->dst.output = ip6_output;
 
-       if (ort->fib6_type == RTN_LOCAL) {
+       if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
                rt->dst.input = ip6_input;
        } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
                rt->dst.input = ip6_mc_input;
@@ -996,7 +994,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
        rt->rt6i_src = ort->fib6_src;
 #endif
        rt->rt6i_prefsrc = ort->fib6_prefsrc;
-       rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
 }
 
 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
@@ -4671,20 +4668,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                         int iif, int type, u32 portid, u32 seq,
                         unsigned int flags)
 {
-       struct rtmsg *rtm;
+       struct rt6_info *rt6 = (struct rt6_info *)dst;
+       struct rt6key *rt6_dst, *rt6_src;
+       u32 *pmetrics, table, rt6_flags;
        struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
        long expires = 0;
-       u32 *pmetrics;
-       u32 table;
 
        nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
+       if (rt6) {
+               rt6_dst = &rt6->rt6i_dst;
+               rt6_src = &rt6->rt6i_src;
+               rt6_flags = rt6->rt6i_flags;
+       } else {
+               rt6_dst = &rt->fib6_dst;
+               rt6_src = &rt->fib6_src;
+               rt6_flags = rt->fib6_flags;
+       }
+
        rtm = nlmsg_data(nlh);
        rtm->rtm_family = AF_INET6;
-       rtm->rtm_dst_len = rt->fib6_dst.plen;
-       rtm->rtm_src_len = rt->fib6_src.plen;
+       rtm->rtm_dst_len = rt6_dst->plen;
+       rtm->rtm_src_len = rt6_src->plen;
        rtm->rtm_tos = 0;
        if (rt->fib6_table)
                table = rt->fib6_table->tb6_id;
@@ -4699,7 +4707,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->fib6_protocol;
 
-       if (rt->fib6_flags & RTF_CACHE)
+       if (rt6_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dest) {
@@ -4707,7 +4715,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
                rtm->rtm_dst_len = 128;
        } else if (rtm->rtm_dst_len)
-               if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
+               if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
                        goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
        if (src) {
@@ -4715,12 +4723,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
                rtm->rtm_src_len = 128;
        } else if (rtm->rtm_src_len &&
-                  nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
+                  nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
                goto nla_put_failure;
 #endif
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
-               if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
+               if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
                        int err = ip6mr_get_route(net, skb, rtm, portid);
 
                        if (err == 0)
@@ -4755,7 +4763,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        /* For multipath routes, walk the siblings list and add
         * each as a nexthop within RTA_MULTIPATH.
         */
-       if (rt->fib6_nsiblings) {
+       if (rt6) {
+               if (rt6_flags & RTF_GATEWAY &&
+                   nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
+                       goto nla_put_failure;
+
+               if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
+                       goto nla_put_failure;
+       } else if (rt->fib6_nsiblings) {
                struct fib6_info *sibling, *next_sibling;
                struct nlattr *mp;
 
@@ -4778,7 +4793,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
        }
 
-       if (rt->fib6_flags & RTF_EXPIRES) {
+       if (rt6_flags & RTF_EXPIRES) {
                expires = dst ? dst->expires : rt->expires;
                expires -= jiffies;
        }
@@ -4786,7 +4801,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
                goto nla_put_failure;
 
-       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
+       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
                goto nla_put_failure;
 
 
index 83f4c77c79d86eda12ec76b572c4f64546f43ae4..28c4aa5078fcb34b773875d28790e367a5c98ad6 100644 (file)
@@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
        }
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+                               struct udphdr *uh)
+{
+       int ret;
+
+       if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+               skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+                                        ip6_compute_pseudo);
+
+       ret = udpv6_queue_rcv_skb(sk, skb);
+
+       /* a return value > 0 means to resubmit the input, but
+        * it wants the return to be -protocol, or 0
+        */
+       if (ret > 0)
+               return -ret;
+       return 0;
+}
+
 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                   int proto)
 {
@@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                if (unlikely(sk->sk_rx_dst != dst))
                        udp6_sk_rx_dst_set(sk, dst);
 
-               ret = udpv6_queue_rcv_skb(sk, skb);
-               sock_put(sk);
+               if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+                       sock_put(sk);
+                       goto report_csum_error;
+               }
 
-               /* a return value > 0 means to resubmit the input */
-               if (ret > 0)
-                       return ret;
-               return 0;
+               ret = udp6_unicast_rcv_skb(sk, skb, uh);
+               sock_put(sk);
+               return ret;
        }
 
        /*
@@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        /* Unicast */
        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
        if (sk) {
-               int ret;
-
-               if (!uh->check && !udp_sk(sk)->no_check6_rx) {
-                       udp6_csum_zero_error(skb);
-                       goto csum_error;
-               }
-
-               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-                       skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-                                                ip6_compute_pseudo);
-
-               ret = udpv6_queue_rcv_skb(sk, skb);
-
-               /* a return value > 0 means to resubmit the input */
-               if (ret > 0)
-                       return ret;
-
-               return 0;
+               if (!uh->check && !udp_sk(sk)->no_check6_rx)
+                       goto report_csum_error;
+               return udp6_unicast_rcv_skb(sk, skb, uh);
        }
 
-       if (!uh->check) {
-               udp6_csum_zero_error(skb);
-               goto csum_error;
-       }
+       if (!uh->check)
+               goto report_csum_error;
 
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard;
@@ -866,6 +872,9 @@ short_packet:
                            ulen, skb->len,
                            daddr, ntohs(uh->dest));
        goto discard;
+
+report_csum_error:
+       udp6_csum_zero_error(skb);
 csum_error:
        __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 discard:
index a21d8ed0a325a5ddafd316b3fc9d972489cb8b7a..e2f16a0173a93bc876e293a68878d83d78cda7ef 100644 (file)
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
        skb->dev = iucv->hs_dev;
-       if (!skb->dev)
-               return -ENODEV;
-       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
-               return -ENETDOWN;
+       if (!skb->dev) {
+               err = -ENODEV;
+               goto err_free;
+       }
+       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+               err = -ENETDOWN;
+               goto err_free;
+       }
        if (skb->len > skb->dev->mtu) {
-               if (sock->sk_type == SOCK_SEQPACKET)
-                       return -EMSGSIZE;
-               else
-                       skb_trim(skb, skb->dev->mtu);
+               if (sock->sk_type == SOCK_SEQPACKET) {
+                       err = -EMSGSIZE;
+                       goto err_free;
+               }
+               skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
        nskb = skb_clone(skb, GFP_ATOMIC);
-       if (!nskb)
-               return -ENOMEM;
+       if (!nskb) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
        skb_queue_tail(&iucv->send_skb_q, nskb);
        err = dev_queue_xmit(skb);
        if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
        }
        return net_xmit_eval(err);
+
+err_free:
+       kfree_skb(skb);
+       return err;
 }
 
 static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                err = afiucv_hs_send(&txmsg, sk, skb, 0);
                if (err) {
                        atomic_dec(&iucv->msg_sent);
-                       goto fail;
+                       goto out;
                }
        } else { /* Classic VM IUCV transport */
                skb_queue_tail(&iucv->send_skb_q, skb);
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sock *sk;
        struct iucv_sock *iucv;
        struct af_iucv_trans_hdr *trans_hdr;
+       int err = NET_RX_SUCCESS;
        char nullstring[8];
-       int err = 0;
 
        if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
                WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
-               ;
+               kfree_skb(skb);
        }
 
        return err;
index 8f7ef167c45a75223dcb00b74174112fafdd0c34..eb502c6290c2ad1668897aedef08e1fc92e299e9 100644 (file)
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
  * Returns 0 if there are still iucv pathes defined
  *        1 if there are no iucv pathes defined
  */
-int iucv_path_table_empty(void)
+static int iucv_path_table_empty(void)
 {
        int i;
 
index 6449a1c2283bf236728f36dc627de7e91c886cda..f0f5fedb8caacd164d6575ebcca9e27b19cfd516 100644 (file)
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
        if (len < IEEE80211_DEAUTH_FRAME_LEN)
                return;
 
-       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
        sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-       ibss_dbg(sdata,
-                "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+       ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+                mgmt->bssid, auth_transaction);
 
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                rx_timestamp = drv_get_tsf(local, sdata);
        }
 
-       ibss_dbg(sdata,
-                "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+       ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
                 mgmt->sa, mgmt->bssid,
-                (unsigned long long)rx_timestamp,
+                (unsigned long long)rx_timestamp);
+       ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
                 (unsigned long long)beacon_timestamp,
                 (unsigned long long)(rx_timestamp - beacon_timestamp),
                 jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 
        tx_last_beacon = drv_tx_last_beacon(local);
 
-       ibss_dbg(sdata,
-                "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+       ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+                mgmt->bssid, tx_last_beacon);
 
        if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
                return;
index 4fb2709cb52796c752f052a746bd5c420d6caf08..513627896204938485ef01f0f2d76606d4418af4 100644 (file)
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        flush_work(&local->radar_detected_work);
        rtnl_lock();
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /*
+                * XXX: there may be more work for other vif types and even
+                * for station mode: a good thing would be to run most of
+                * the iface type's dependent _stop (ieee80211_mg_stop,
+                * ieee80211_ibss_stop) etc...
+                * For now, fix only the specific bug that was seen: race
+                * between csa_connection_drop_work and us.
+                */
+               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+                       /*
+                        * This worker is scheduled from the iface worker that
+                        * runs on mac80211's workqueue, so we can't be
+                        * scheduling this worker after the cancel right here.
+                        * The exception is ieee80211_chswitch_done.
+                        * Then we can have a race...
+                        */
+                       cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+               }
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+       }
        ieee80211_scan_cancel(local);
 
        /* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
                cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
                            IEEE80211_VHT_CAP_SHORT_GI_80 |
                            IEEE80211_VHT_CAP_SHORT_GI_160 |
-                           IEEE80211_VHT_CAP_RXSTBC_1 |
-                           IEEE80211_VHT_CAP_RXSTBC_2 |
-                           IEEE80211_VHT_CAP_RXSTBC_3 |
-                           IEEE80211_VHT_CAP_RXSTBC_4 |
+                           IEEE80211_VHT_CAP_RXSTBC_MASK |
                            IEEE80211_VHT_CAP_TXSTBC |
                            IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
                            IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
        skb_queue_purge(&local->skb_queue_tdls_chsw);
-       ieee80211_txq_teardown_flows(local);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index 35ad3983ae4b6b26fb13f6718b692c9b9b0d6e98..daf9db3c8f24f389df84d95ae973c969d65622f1 100644 (file)
@@ -572,6 +572,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                forward = false;
                reply = true;
                target_metric = 0;
+
+               if (SN_GT(target_sn, ifmsh->sn))
+                       ifmsh->sn = target_sn;
+
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
index 7fb9957359a3c1be557e577ba5b76cc4c1177105..3dbecae4be73cb1aae05752807b717a329b80980 100644 (file)
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
+               struct ieee80211_supported_band *sband = NULL;
+               struct sta_info *mgd_sta = NULL;
+               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
+               if (sdata->vif.bss_conf.chandef.width !=
+                   sdata->csa_chandef.width) {
+                       /*
+                        * For managed interface, we need to also update the AP
+                        * station bandwidth and align the rate scale algorithm
+                        * on the bandwidth change. Here we only consider the
+                        * bandwidth of the new channel definition (as channel
+                        * switch flow does not have the full HT/VHT/HE
+                        * information), assuming that if additional changes are
+                        * required they would be done as part of the processing
+                        * of the next beacon from the AP.
+                        */
+                       switch (sdata->csa_chandef.width) {
+                       case NL80211_CHAN_WIDTH_20_NOHT:
+                       case NL80211_CHAN_WIDTH_20:
+                       default:
+                               bw = IEEE80211_STA_RX_BW_20;
+                               break;
+                       case NL80211_CHAN_WIDTH_40:
+                               bw = IEEE80211_STA_RX_BW_40;
+                               break;
+                       case NL80211_CHAN_WIDTH_80:
+                               bw = IEEE80211_STA_RX_BW_80;
+                               break;
+                       case NL80211_CHAN_WIDTH_80P80:
+                       case NL80211_CHAN_WIDTH_160:
+                               bw = IEEE80211_STA_RX_BW_160;
+                               break;
+                       }
+
+                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+                       sband =
+                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+               }
+
+               if (sdata->vif.bss_conf.chandef.width >
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
+               if (sdata->vif.bss_conf.chandef.width <
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                goto out;
        }
 
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                         cbss->beacon_interval));
        return;
  drop_connection:
+       /*
+        * This is just so that the disconnect flow will know that
+        * we were trying to switch channel and failed. In case the
+        * mode is 1 (we are not allowed to Tx), we will know not to
+        * send a deauthentication frame. Those two fields will be
+        * reset when the disconnection worker runs.
+        */
+       sdata->vif.csa_active = true;
+       sdata->csa_block_tx = csa_ie.mode;
+
        ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
        mutex_unlock(&local->chanctx_mtx);
        mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+       bool tx;
 
        sdata_lock(sdata);
        if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
+       tx = !sdata->csa_block_tx;
+
        /* AP is probably out of range (or not reachable for another reason) so
         * remove the bss struct for that AP.
         */
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              true, frame_buf);
+                              tx, frame_buf);
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
        ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        }
        mutex_unlock(&local->mtx);
 
-       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
                                    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
        sdata_unlock(sdata);
index 64742f2765c4846c36d3f9304314059023215d0a..96611d5dfadb0ce37effd4adcce80454bb69f285 100644 (file)
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !is_multicast_ether_addr(hdr->addr1) &&
            (ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_data(hdr->frame_control)) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
index cd332e3e1134bed3efb89838ac245b0402e7a604..f353d9db54bc1f049e14b20713af88bde1da3c62 100644 (file)
@@ -3078,27 +3078,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
 }
 
 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
-                                       struct sk_buff *skb, int headroom,
-                                       int *subframe_len)
+                                       struct sk_buff *skb, int headroom)
 {
-       int amsdu_len = *subframe_len + sizeof(struct ethhdr);
-       int padding = (4 - amsdu_len) & 3;
-
-       if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+       if (skb_headroom(skb) < headroom) {
                I802_DEBUG_INC(local->tx_expand_skb_head);
 
-               if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+               if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
                        wiphy_debug(local->hw.wiphy,
                                    "failed to reallocate TX buffer\n");
                        return false;
                }
        }
 
-       if (padding) {
-               *subframe_len += padding;
-               skb_put_zero(skb, padding);
-       }
-
        return true;
 }
 
@@ -3122,8 +3113,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
        if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
                return true;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
-                                        &subframe_len))
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
                return false;
 
        data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3179,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        void *data;
        bool ret = false;
        unsigned int orig_len;
-       int n = 1, nfrags;
+       int n = 2, nfrags, pad = 0;
+       u16 hdrlen;
 
        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
                return false;
@@ -3222,9 +3213,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (skb->len + head->len > max_amsdu_len)
                goto out;
 
-       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
-               goto out;
-
        nfrags = 1 + skb_shinfo(skb)->nr_frags;
        nfrags += 1 + skb_shinfo(head)->nr_frags;
        frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3228,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (max_frags && nfrags > max_frags)
                goto out;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
-                                        &subframe_len))
+       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /*
+        * Pad out the previous subframe to a multiple of 4 by adding the
+        * padding to the next one, that's being added. Note that head->len
+        * is the length of the full A-MSDU, but that works since each time
+        * we add a new subframe we pad out the previous one to a multiple
+        * of 4 and thus it no longer matters in the next round.
+        */
+       hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
+       if ((head->len - hdrlen) & 3)
+               pad = 4 - ((head->len - hdrlen) & 3);
+
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
+                                                    2 + pad))
+               goto out_recalc;
+
        ret = true;
        data = skb_push(skb, ETH_ALEN + 2);
        memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3255,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        memcpy(data, &len, 2);
        memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
 
+       memset(skb_push(skb, pad), 0, pad);
+
        head->len += skb->len;
        head->data_len += skb->len;
        *frag_tail = skb;
 
-       flow->backlog += head->len - orig_len;
-       tin->backlog_bytes += head->len - orig_len;
-
-       fq_recalc_backlog(fq, tin, flow);
+out_recalc:
+       if (head->len != orig_len) {
+               flow->backlog += head->len - orig_len;
+               tin->backlog_bytes += head->len - orig_len;
 
+               fq_recalc_backlog(fq, tin, flow);
+       }
 out:
        spin_unlock_bh(&fq->lock);
 
index 88efda7c9f8a78737538a355b1b499104ab55aea..716cd6442d86c85b6507ba1aaa8e28e56f58e65b 100644 (file)
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
        const struct ieee80211_reg_rule *rrule;
-       struct ieee80211_wmm_ac *wmm_ac;
+       const struct ieee80211_wmm_ac *wmm_ac;
        u16 center_freq = 0;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 
        rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
 
-       if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+       if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
                rcu_read_unlock();
                return;
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
-               wmm_ac = &rrule->wmm_rule->ap[ac];
+               wmm_ac = &rrule->wmm_rule.ap[ac];
        else
-               wmm_ac = &rrule->wmm_rule->client[ac];
+               wmm_ac = &rrule->wmm_rule.client[ac];
        qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
        qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
        qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
-       qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
-               min_t(u16, qparam->txop, wmm_ac->cot / 32);
+       qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
        rcu_read_unlock();
 }
 
index 82e6edf9c5d9c8454d17c53a2e0c461ff399a881..45f33d6dedf771c7d9f4a21496201ad3198aba8b 100644 (file)
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
        bool found;
        int rc;
 
-       if (id > ndp->package_num) {
+       if (id > ndp->package_num - 1) {
                netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
                return -ENODEV;
        }
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
                return 0; /* done */
 
        hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
-                         &ncsi_genl_family, 0,  NCSI_CMD_PKG_INFO);
+                         &ncsi_genl_family, NLM_F_MULTI,  NCSI_CMD_PKG_INFO);
        if (!hdr) {
                rc = -EMSGSIZE;
                goto err;
index 71709c104081448f3d6bc50305ad344cb815167e..f61c306de1d089358ede089f87f97c72c8927670 100644 (file)
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
        depends on NETFILTER_ADVANCED
        ---help---
          This option adds a `CHECKSUM' target, which can be used in the iptables mangle
-         table.
+         table to work around buggy DHCP clients in virtualized environments.
 
-         You can use this target to compute and fill in the checksum in
-         a packet that lacks a checksum.  This is particularly useful,
-         if you need to work around old applications such as dhcp clients,
-         that do not work well with checksum offloads, but don't want to disable
-         checksum offload in your device.
+         Some old DHCP clients drop packets because they are not aware
+         that the checksum would normally be offloaded to hardware and
+         thus should be considered valid.
+         This target can be used to fill in the checksum using iptables
+         when such packets are sent via a virtual network device.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
index 9f14b0df6960d167a0980cc3896e4dbc19679607..51c5d7eec0a3517518a38cce411bf7b09189c15e 100644 (file)
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
 };
 #endif
 
+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
+{
+       u8 nfproto = (unsigned long)_nfproto;
+
+       if (nf_ct_l3num(ct) != nfproto)
+               return 0;
+
+       if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+           ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
+               ct->proto.tcp.seen[0].td_maxwin = 0;
+               ct->proto.tcp.seen[1].td_maxwin = 0;
+       }
+
+       return 0;
+}
+
 static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
 {
        struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+       bool fixup_needed = false;
        int err = 0;
 
        mutex_lock(&nf_ct_proto_mutex);
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv4_conntrack_ops));
                if (err)
                        cnet->users4 = 0;
+               else
+                       fixup_needed = true;
                break;
 #if IS_ENABLED(CONFIG_IPV6)
        case NFPROTO_IPV6:
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv6_conntrack_ops));
                if (err)
                        cnet->users6 = 0;
+               else
+                       fixup_needed = true;
                break;
 #endif
        default:
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
        }
  out_unlock:
        mutex_unlock(&nf_ct_proto_mutex);
+
+       if (fixup_needed)
+               nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
+                                         (void *)(unsigned long)nfproto, 0, 0);
+
        return err;
 }
 
index 8c58f96b59e701422cd60303dfc6a2f3db8c1b04..f3f91ed2c21adce5dcc5e9ba06d6a843a6af39bb 100644 (file)
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
        return 0;
 }
 
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
        [CTA_TIMEOUT_DCCP_CLOSING]      = { .type = NLA_U32 },
        [CTA_TIMEOUT_DCCP_TIMEWAIT]     = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 /* template, data assigned later */
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
                dn->dccp_timeout[CT_DCCP_CLOSEREQ]      = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_CLOSING]       = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_TIMEWAIT]      = 2 * DCCP_MSL;
+
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
        }
 
        return dccp_kmemdup_sysctl_table(net, pn, dn);
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
index ac4a0b296dcda7c7bbc51727fd0d57974813f37a..1df3244ecd07fc573538cccb83da67efaa69a5be 100644 (file)
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
        return ret;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -113,7 +113,7 @@ static const struct nla_policy
 generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
        [CTA_TIMEOUT_GENERIC_TIMEOUT]   = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table generic_sysctl_table[] = {
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
        .pkt_to_tuple           = generic_pkt_to_tuple,
        .packet                 = generic_packet,
        .new                    = generic_new,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = generic_timeout_nlattr_to_obj,
                .obj_to_nlattr  = generic_timeout_obj_to_nlattr,
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = generic_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = generic_init_net,
        .get_net_proto          = generic_get_net_proto,
 };
index d1632252bf5b65611e0a5afc7c87c96607fe6a71..650eb4fba2c5418951b4d22e62325726a745af22 100644 (file)
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
                nf_ct_gre_keymap_destroy(master);
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
        [CTA_TIMEOUT_GRE_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_GRE_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 static int gre_init_net(struct net *net, u_int16_t proto)
 {
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
        .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
        .nla_policy      = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout    = {
                .nlattr_to_obj  = gre_timeout_nlattr_to_obj,
                .obj_to_nlattr  = gre_timeout_obj_to_nlattr,
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
                .obj_size       = sizeof(unsigned int) * GRE_CT_MAX,
                .nla_policy     = gre_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .net_id         = &proto_gre_net_id,
        .init_net       = gre_init_net,
 };
index 036670b38282672d3760bd374d190a5df5af8393..43c7e1a217b98682a1abd5d3cdb6f63fa1febec2 100644 (file)
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -313,7 +313,7 @@ static const struct nla_policy
 icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
        [CTA_TIMEOUT_ICMP_TIMEOUT]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmp_sysctl_table[] = {
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
        .nlattr_to_tuple        = icmp_nlattr_to_tuple,
        .nla_policy             = icmp_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmp_timeout_obj_to_nlattr,
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmp_init_net,
        .get_net_proto          = icmp_get_net_proto,
 };
index bed07b998a10d06593be06bcdf590f2853fbf0b2..97e40f77d678a64204cfdd660b92bb1960752305 100644 (file)
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -314,7 +314,7 @@ static const struct nla_policy
 icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
        [CTA_TIMEOUT_ICMPV6_TIMEOUT]    = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmpv6_sysctl_table[] = {
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
        .nlattr_to_tuple        = icmpv6_nlattr_to_tuple,
        .nla_policy             = icmpv6_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmpv6_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmpv6_timeout_obj_to_nlattr,
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmpv6_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmpv6_init_net,
        .get_net_proto          = icmpv6_get_net_proto,
 };
index 8d1e085fc14a4a3776e8d67ca06b7cc82519fcb0..e4d738d34cd030fe5b2cd1629d436e3fa0fa4557 100644 (file)
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
        return 0;
 }
 
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 
 #ifdef CONFIG_SYSCTL
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
 
                for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
                        sn->timeouts[i] = sctp_timeouts[i];
+
+               /* timeouts[0] is unused, init it so ->timeouts[0] contains
+                * 'new' timeout, like udp or icmp.
+                */
+               sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
        }
 
        return sctp_kmemdup_sysctl_table(pn, sn);
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#endif
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
index d80d322b9d8ba0fcaec08a87a5e7ddcb6c533fa4..b4bdf9eda7b740dccb6501c5b0155c32d17d7e5b 100644 (file)
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_SYN_SENT] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
        }
+
        if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
                timeouts[TCP_CONNTRACK_SYN_RECV] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_UNACK] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
        }
+
+       timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
        return 0;
 }
 
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
        [CTA_TIMEOUT_TCP_RETRANS]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_TCP_UNACK]         = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table tcp_sysctl_table[] = {
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
                for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
                        tn->timeouts[i] = tcp_timeouts[i];
 
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
                tn->tcp_loose = nf_ct_tcp_loose;
                tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
                tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
        .nlattr_size            = TCP_NLATTR_SIZE,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
        .nlattr_tuple_size      = tcp_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
index 7a1b8988a931ab15145d6371288549da51222076..3065fb8ef91b74348f95900a7b3b0cfc92970469 100644 (file)
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
        [CTA_TIMEOUT_UDP_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_UDP_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table udp_sysctl_table[] = {
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
 #endif
-#include <net/netfilter/nf_conntrack_timeout.h>
index 1dca5683f59f1d3ae08af245ce3807e0d50bc3da..2cfb173cd0b2d8a5e99e5165edcecffcd610af33 100644 (file)
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
        }
        set->ndeact++;
 
+       nft_set_elem_deactivate(ctx->net, set, elem);
        nft_trans_elem_set(trans) = set;
        nft_trans_elem(trans) = *elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
index d46a236cdf31b612f808b33867cd5ee68e70af33..a30f8ba4b89ac427053281936ad3e70750e3a2f0 100644 (file)
@@ -489,8 +489,8 @@ err:
        return err;
 }
 
-static struct ctnl_timeout *
-ctnl_timeout_find_get(struct net *net, const char *name)
+static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
+                                                  const char *name)
 {
        struct ctnl_timeout *timeout, *matching = NULL;
 
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
                break;
        }
 err:
-       return matching;
+       return matching ? &matching->timeout : NULL;
 }
 
 static void ctnl_timeout_put(struct nf_ct_timeout *t)
index ea4ba551abb28cb25c833dc408e23d1313b21bb4..d33094f4ec41e380c57b57b26975f263cfe6bf8f 100644 (file)
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        int err;
 
        if (verdict == NF_ACCEPT ||
+           verdict == NF_REPEAT ||
            verdict == NF_STOP) {
                rcu_read_lock();
                ct_hook = rcu_dereference(nf_ct_hook);
index 26a8baebd0722c30746edf8f4c91308efb83373a..5dd87748afa8a9185de95c439881f72d3f96e798 100644 (file)
@@ -799,7 +799,7 @@ err:
 }
 
 struct nft_ct_timeout_obj {
-       struct nf_conn          *tmpl;
+       struct nf_ct_timeout    *timeout;
        u8                      l4proto;
 };
 
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
-       struct sk_buff *skb = pkt->skb;
+       struct nf_conn_timeout *timeout;
+       const unsigned int *values;
+
+       if (priv->l4proto != pkt->tprot)
+               return;
 
-       if (ct ||
-           priv->l4proto != pkt->tprot)
+       if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
                return;
 
-       nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+       timeout = nf_ct_timeout_find(ct);
+       if (!timeout) {
+               timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
+               if (!timeout) {
+                       regs->verdict.code = NF_DROP;
+                       return;
+               }
+       }
+
+       rcu_assign_pointer(timeout->timeout, priv->timeout);
+
+       /* adjust the timeout as per 'new' state. ct is unconfirmed,
+        * so the current timestamp must not be added.
+        */
+       values = nf_ct_timeout_data(timeout);
+       if (values)
+               nf_ct_refresh(ct, pkt->skb, values[0]);
 }
 
 static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
                                   const struct nlattr * const tb[],
                                   struct nft_object *obj)
 {
-       const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        const struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn_timeout *timeout_ext;
        struct nf_ct_timeout *timeout;
        int l3num = ctx->family;
-       struct nf_conn *tmpl;
        __u8 l4num;
        int ret;
 
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
 
        timeout->l3num = l3num;
        timeout->l4proto = l4proto;
-       tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
-       if (!tmpl) {
-               ret = -ENOMEM;
-               goto err_free_timeout;
-       }
-
-       timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
-       if (!timeout_ext) {
-               ret = -ENOMEM;
-               goto err_free_tmpl;
-       }
 
        ret = nf_ct_netns_get(ctx->net, ctx->family);
        if (ret < 0)
-               goto err_free_tmpl;
-
-       priv->tmpl = tmpl;
+               goto err_free_timeout;
 
+       priv->timeout = timeout;
        return 0;
 
-err_free_tmpl:
-       nf_ct_tmpl_free(tmpl);
 err_free_timeout:
        kfree(timeout);
 err_proto_put:
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
                                       struct nft_object *obj)
 {
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       struct nf_ct_timeout *timeout;
+       struct nf_ct_timeout *timeout = priv->timeout;
 
-       timeout = rcu_dereference_raw(t->timeout);
        nf_ct_untimeout(ctx->net, timeout);
        nf_ct_l4proto_put(timeout->l4proto);
        nf_ct_netns_put(ctx->net, ctx->family);
-       nf_ct_tmpl_free(priv->tmpl);
+       kfree(priv->timeout);
 }
 
 static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
                                   struct nft_object *obj, bool reset)
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+       const struct nf_ct_timeout *timeout = priv->timeout;
        struct nlattr *nest_params;
        int ret;
 
index 9f4151ec3e06e73460243f2b0c5c49762b37bb6d..6c7aa6a0a0d2523377f97e911cf3dcba459de752 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CHECKSUM.h>
 
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
 MODULE_DESCRIPTION("Xtables: checksum modification");
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
 static unsigned int
 checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
                skb_checksum_help(skb);
 
        return XT_CONTINUE;
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int checksum_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_CHECKSUM_info *einfo = par->targinfo;
+       const struct ip6t_ip6 *i6 = par->entryinfo;
+       const struct ipt_ip *i4 = par->entryinfo;
 
        if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
                pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
        if (!einfo->operation)
                return -EINVAL;
 
+       switch (par->family) {
+       case NFPROTO_IPV4:
+               if (i4->proto == IPPROTO_UDP &&
+                   (i4->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       case NFPROTO_IPV6:
+               if ((i6->flags & IP6T_F_PROTO) &&
+                   i6->proto == IPPROTO_UDP &&
+                   (i6->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       }
+
+       pr_warn_once("CHECKSUM should be avoided.  If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
        return 0;
 }
 
index dfbdbb2fc0ed8b024122ef1c3371c1bba697d7f8..51d0c257e7a57948e1c1c033aab63614b827ca2e 100644 (file)
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
 {
        struct xt_cluster_match_info *info = par->matchinfo;
+       int ret;
 
        if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
                pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
                pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
                return -EDOM;
        }
-       return 0;
+
+       ret = nf_ct_netns_get(par->net, par->family);
+       if (ret < 0)
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
+       return ret;
+}
+
+static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       nf_ct_netns_put(par->net, par->family);
 }
 
 static struct xt_match xt_cluster_match __read_mostly = {
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
        .match          = xt_cluster_mt,
        .checkentry     = xt_cluster_mt_checkentry,
        .matchsize      = sizeof(struct xt_cluster_match_info),
+       .destroy        = xt_cluster_mt_destroy,
        .me             = THIS_MODULE,
 };
 
index 9b16402f29af82a9fd594099bd199531c5d69ded..3e7d259e5d8de01148729022f55c44310f7d6ed7 100644 (file)
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
        __acquires(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket;
 
        spin_lock_bh(&htable->lock);
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
 
 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        *pos = ++(*bucket);
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 static void dl_seq_stop(struct seq_file *s, void *v)
        __releases(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        if (!IS_ERR(bucket))
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                            struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
 
 static int dl_seq_show_v2(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = (unsigned int *)v;
        struct dsthash_ent *ent;
 
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
 
 static int dl_seq_show_v1(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
 
 static int dl_seq_show(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
index 5610061e7f2e00b935ce44dd9cf82d10eb77a7bf..75c92a87e7b2481141161c8945f5e7eef8e0abf8 100644 (file)
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
        .close  =       packet_mm_close,
 };
 
-static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+                       unsigned int len)
 {
        int i;
 
        for (i = 0; i < len; i++) {
                if (likely(pg_vec[i].buffer)) {
-                       kvfree(pg_vec[i].buffer);
+                       if (is_vmalloc_addr(pg_vec[i].buffer))
+                               vfree(pg_vec[i].buffer);
+                       else
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
                        pg_vec[i].buffer = NULL;
                }
        }
        kfree(pg_vec);
 }
 
-static char *alloc_one_pg_vec_page(unsigned long size)
+static char *alloc_one_pg_vec_page(unsigned long order)
 {
        char *buffer;
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+                         __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-       buffer = kvzalloc(size, GFP_KERNEL);
+       buffer = (char *) __get_free_pages(gfp_flags, order);
        if (buffer)
                return buffer;
 
-       buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       /* __get_free_pages failed, fall back to vmalloc */
+       buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
+       if (buffer)
+               return buffer;
 
-       return buffer;
+       /* vmalloc failed, lets dig into swap here */
+       gfp_flags &= ~__GFP_NORETRY;
+       buffer = (char *) __get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
+
+       /* complete and utter failure */
+       return NULL;
 }
 
-static struct pgv *alloc_pg_vec(struct tpacket_req *req)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
        unsigned int block_nr = req->tp_block_nr;
-       unsigned long size = req->tp_block_size;
        struct pgv *pg_vec;
        int i;
 
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i].buffer = alloc_one_pg_vec_page(size);
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order);
                if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
@@ -4184,7 +4200,7 @@ out:
        return pg_vec;
 
 out_free_pgvec:
-       free_pg_vec(pg_vec, block_nr);
+       free_pg_vec(pg_vec, order, block_nr);
        pg_vec = NULL;
        goto out;
 }
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
-       int was_running;
        __be16 num;
        int err = -EINVAL;
        /* Added to avoid minimal code churn */
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
 
                err = -ENOMEM;
-               pg_vec = alloc_pg_vec(req);
+               order = get_order(req->tp_block_size);
+               pg_vec = alloc_pg_vec(req, order);
                if (unlikely(!pg_vec))
                        goto out;
                switch (po->tp_version) {
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frame_size = req->tp_frame_size;
                spin_unlock_bh(&rb_queue->lock);
 
+               swap(rb->pg_vec_order, order);
                swap(rb->pg_vec_len, req->tp_block_nr);
 
                rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (pg_vec)
-               free_pg_vec(pg_vec, req->tp_block_nr);
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
        return err;
 }
index 8f50036f62f05c76632dea82491d6a60dba39f0f..3bb7c5fb3bff2fd5d91c3d973d006d0cdde29a0b 100644 (file)
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
        unsigned int            frame_size;
        unsigned int            frame_max;
 
+       unsigned int            pg_vec_order;
        unsigned int            pg_vec_pages;
        unsigned int            pg_vec_len;
 
index 01b3bd6a3708dece9e1e064c58e21c9d61ad8138..b9092111bc459d1b309c84a713005e7df66b02cf 100644 (file)
@@ -1,6 +1,6 @@
 
 config RDS
-       tristate "The RDS Protocol"
+       tristate "The Reliable Datagram Sockets Protocol"
        depends on INET
        ---help---
          The RDS (Reliable Datagram Sockets) protocol provides reliable,
index 3ab55784b637e3b30bd5dca1a19814943aae42e7..762d2c6788a385631a312a39625d67a1154ef596 100644 (file)
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
        struct rds_sock *rs;
 
        __rds_create_bind_key(key, addr, port, scope_id);
-       rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
+       rcu_read_lock();
+       rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
        if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
                rds_sock_addref(rs);
        else
                rs = NULL;
+       rcu_read_unlock();
 
        rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
                 ntohs(port));
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
+       sock_set_flag(sk, SOCK_RCU_FREE);
        ret = rds_add_bound(rs, binding_addr, &port, scope_id);
        if (ret)
                goto out;
index c1d97640c0be7bf6307806e2e6b112111756b7a1..eba75c1ba359446ca7e39e5aaa88287129531055 100644 (file)
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
 
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
-               struct rdma_dev_addr *dev_addr;
 
                ic = conn->c_transport_data;
-               dev_addr = &ic->i_cm_id->route.addr.dev_addr;
-               rdma_addr_get_sgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->src_gid);
-               rdma_addr_get_dgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->dst_gid);
-
+               rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
+                              (union ib_gid *)&iinfo6->dst_gid);
                rds_ibdev = ic->rds_ibdev;
                iinfo6->max_send_wr = ic->i_send_ring.w_nr;
                iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
index 2c7b7c352d3e8cf77c7aed4d29b61d481249827f..b9bbcf3d6c63976912433109d5c450830fb779f4 100644 (file)
@@ -37,7 +37,6 @@
 #include <net/tcp.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
-#include <net/tcp.h>
 #include <net/addrconf.h>
 
 #include "rds.h"
index 00192a996be0eab95be229f801ada687cca0f71a..0f846585225431c6ae903bd8308884729c99bb3c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/rfkill.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
index 229d63c99be23b2329caa84b6912c8f770ccca64..e12f8ef7baa438997da79c86eedec3efde1babcd 100644 (file)
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(tcf_generic_walker);
 
-static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
-                           struct tc_action **a, int bind)
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
        struct tc_action *p;
 
        spin_lock(&idrinfo->lock);
        p = idr_find(&idrinfo->action_idr, index);
-       if (IS_ERR(p)) {
+       if (IS_ERR(p))
                p = NULL;
-       } else if (p) {
+       else if (p)
                refcount_inc(&p->tcfa_refcnt);
-               if (bind)
-                       atomic_inc(&p->tcfa_bindcnt);
-       }
        spin_unlock(&idrinfo->lock);
 
        if (p) {
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
        }
        return false;
 }
-
-int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
-{
-       return __tcf_idr_check(tn, index, a, 0);
-}
 EXPORT_SYMBOL(tcf_idr_search);
 
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
-                  int bind)
+static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 {
-       return __tcf_idr_check(tn, index, a, bind);
-}
-EXPORT_SYMBOL(tcf_idr_check);
-
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
-{
-       struct tcf_idrinfo *idrinfo = tn->idrinfo;
        struct tc_action *p;
        int ret = 0;
 
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
        spin_unlock(&idrinfo->lock);
        return ret;
 }
-EXPORT_SYMBOL(tcf_idr_delete_index);
 
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
 
        p->idrinfo = idrinfo;
        p->ops = ops;
-       INIT_LIST_HEAD(&p->list);
        *a = p;
        return 0;
 err3:
@@ -681,19 +662,30 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
        return ret;
 }
 
+static int tcf_action_destroy_1(struct tc_action *a, int bind)
+{
+       struct tc_action *actions[] = { a, NULL };
+
+       return tcf_action_destroy(actions, bind);
+}
+
 static int tcf_action_put(struct tc_action *p)
 {
        return __tcf_action_put(p, false);
 }
 
+/* Put all actions in this array, skip those NULL's. */
 static void tcf_action_put_many(struct tc_action *actions[])
 {
        int i;
 
-       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+       for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
                struct tc_action *a = actions[i];
-               const struct tc_action_ops *ops = a->ops;
+               const struct tc_action_ops *ops;
 
+               if (!a)
+                       continue;
+               ops = a->ops;
                if (tcf_action_put(a))
                        module_put(ops->owner);
        }
@@ -896,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
                err = tcf_action_goto_chain_init(a, tp);
                if (err) {
-                       struct tc_action *actions[] = { a, NULL };
-
-                       tcf_action_destroy(actions, bind);
+                       tcf_action_destroy_1(a, bind);
                        NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
                        return ERR_PTR(err);
                }
        }
 
        if (!tcf_action_valid(a->tcfa_action)) {
-               NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
-               a->tcfa_action = TC_ACT_UNSPEC;
+               tcf_action_destroy_1(a, bind);
+               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               return ERR_PTR(-EINVAL);
        }
 
        return a;
@@ -1175,41 +1166,38 @@ err_out:
        return err;
 }
 
-static int tcf_action_delete(struct net *net, struct tc_action *actions[],
-                            int *acts_deleted, struct netlink_ext_ack *extack)
+static int tcf_action_delete(struct net *net, struct tc_action *actions[])
 {
-       u32 act_index;
-       int ret, i;
+       int i;
 
        for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
                struct tc_action *a = actions[i];
                const struct tc_action_ops *ops = a->ops;
-
                /* Actions can be deleted concurrently so we must save their
                 * type and id to search again after reference is released.
                 */
-               act_index = a->tcfa_index;
+               struct tcf_idrinfo *idrinfo = a->idrinfo;
+               u32 act_index = a->tcfa_index;
 
+               actions[i] = NULL;
                if (tcf_action_put(a)) {
                        /* last reference, action was deleted concurrently */
                        module_put(ops->owner);
                } else  {
+                       int ret;
+
                        /* now do the delete */
-                       ret = ops->delete(net, act_index);
-                       if (ret < 0) {
-                               *acts_deleted = i + 1;
+                       ret = tcf_idr_delete_index(idrinfo, act_index);
+                       if (ret < 0)
                                return ret;
-                       }
                }
        }
-       *acts_deleted = i;
        return 0;
 }
 
 static int
 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
-              int *acts_deleted, u32 portid, size_t attr_size,
-              struct netlink_ext_ack *extack)
+              u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
 {
        int ret;
        struct sk_buff *skb;
@@ -1227,7 +1215,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
        }
 
        /* now do the delete */
-       ret = tcf_action_delete(net, actions, acts_deleted, extack);
+       ret = tcf_action_delete(net, actions);
        if (ret < 0) {
                NL_SET_ERR_MSG(extack, "Failed to delete TC action");
                kfree_skb(skb);
@@ -1249,8 +1237,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *act;
        size_t attr_size = 0;
-       struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
-       int acts_deleted = 0;
+       struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
        ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
        if (ret < 0)
@@ -1280,14 +1267,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        if (event == RTM_GETACTION)
                ret = tcf_get_notify(net, portid, n, actions, event, extack);
        else { /* delete */
-               ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
-                                    attr_size, extack);
+               ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
                if (ret)
                        goto err;
-               return ret;
+               return 0;
        }
 err:
-       tcf_action_put_many(&actions[acts_deleted]);
+       tcf_action_put_many(actions);
        return ret;
 }
 
index d30b23e424364300f5440da022abb2129b9cf5f8..0c68bc9cf0b4df540a223e14dfa8ff569f96a40c 100644 (file)
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_bpf_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_bpf_ops __read_mostly = {
        .kind           =       "bpf",
        .type           =       TCA_ACT_BPF,
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
        .init           =       tcf_bpf_init,
        .walk           =       tcf_bpf_walker,
        .lookup         =       tcf_bpf_search,
-       .delete         =       tcf_bpf_delete,
        .size           =       sizeof(struct tcf_bpf),
 };
 
index 54c0bf54f2acf71cd63ff17ef0cb74898bc0d478..6f0f273f1139f83ef1a45f017c37d200c36fcae1 100644 (file)
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_connmark_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_connmark_ops = {
        .kind           =       "connmark",
        .type           =       TCA_ACT_CONNMARK,
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
        .init           =       tcf_connmark_init,
        .walk           =       tcf_connmark_walker,
        .lookup         =       tcf_connmark_search,
-       .delete         =       tcf_connmark_delete,
        .size           =       sizeof(struct tcf_connmark_info),
 };
 
index e698d3fe2080d1051d70c6ab92ff111bcd42d36d..b8a67ae3105ad10f645bfcb65a503dafcbe5cbb2 100644 (file)
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
        return nla_total_size(sizeof(struct tc_csum));
 }
 
-static int tcf_csum_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, csum_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_csum_ops = {
        .kind           = "csum",
        .type           = TCA_ACT_CSUM,
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
        .walk           = tcf_csum_walker,
        .lookup         = tcf_csum_search,
        .get_fill_size  = tcf_csum_get_fill_size,
-       .delete         = tcf_csum_delete,
        .size           = sizeof(struct tcf_csum),
 };
 
index 6a3f25a8ffb30b8998fc2a7c37aaccc2c29c466d..cd1d9bd32ef9af4c5789e0331b6d1c1b7e6820f3 100644 (file)
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
        return sz;
 }
 
-static int tcf_gact_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, gact_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_gact_ops = {
        .kind           =       "gact",
        .type           =       TCA_ACT_GACT,
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
        .walk           =       tcf_gact_walker,
        .lookup         =       tcf_gact_search,
        .get_fill_size  =       tcf_gact_get_fill_size,
-       .delete         =       tcf_gact_delete,
        .size           =       sizeof(struct tcf_gact),
 };
 
index d1081bdf1bdb5565660e41130e541800392e3889..06a3d48018782e5d35981fdcfc3208a5f11ad276 100644 (file)
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
 {
        struct tcf_meta_ops *o;
 
-       read_lock_bh(&ife_mod_lock);
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
                if (o->metaid == metaid) {
                        if (!try_module_get(o->owner))
                                o = NULL;
-                       read_unlock_bh(&ife_mod_lock);
+                       read_unlock(&ife_mod_lock);
                        return o;
                }
        }
-       read_unlock_bh(&ife_mod_lock);
+       read_unlock(&ife_mod_lock);
 
        return NULL;
 }
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
            !mops->get || !mops->alloc)
                return -EINVAL;
 
-       write_lock_bh(&ife_mod_lock);
+       write_lock(&ife_mod_lock);
 
        list_for_each_entry(m, &ifeoplist, list) {
                if (m->metaid == mops->metaid ||
                    (strcmp(mops->name, m->name) == 0)) {
-                       write_unlock_bh(&ife_mod_lock);
+                       write_unlock(&ife_mod_lock);
                        return -EEXIST;
                }
        }
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
                mops->release = ife_release_meta_gen;
 
        list_add_tail(&mops->list, &ifeoplist);
-       write_unlock_bh(&ife_mod_lock);
+       write_unlock(&ife_mod_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
        struct tcf_meta_ops *m;
        int err = -ENOENT;
 
-       write_lock_bh(&ife_mod_lock);
+       write_lock(&ife_mod_lock);
        list_for_each_entry(m, &ifeoplist, list) {
                if (m->metaid == mops->metaid) {
                        list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
                        break;
                }
        }
-       write_unlock_bh(&ife_mod_lock);
+       write_unlock(&ife_mod_lock);
 
        return err;
 }
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
 #endif
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-                               void *val, int len, bool exists,
-                               bool rtnl_held)
+static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
 {
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
        if (!ops) {
                ret = -ENOENT;
 #ifdef CONFIG_MODULES
-               if (exists)
-                       spin_unlock_bh(&ife->tcf_lock);
                if (rtnl_held)
                        rtnl_unlock();
                request_module("ife-meta-%s", ife_meta_id2name(metaid));
                if (rtnl_held)
                        rtnl_lock();
-               if (exists)
-                       spin_lock_bh(&ife->tcf_lock);
                ops = find_ife_oplist(metaid);
 #endif
        }
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-                       int len, bool atomic)
+static int __add_metainfo(const struct tcf_meta_ops *ops,
+                         struct tcf_ife_info *ife, u32 metaid, void *metaval,
+                         int len, bool atomic, bool exists)
 {
        struct tcf_meta_info *mi = NULL;
-       struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
 
-       if (!ops)
-               return -ENOENT;
-
        mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
-       if (!mi) {
-               /*put back what find_ife_oplist took */
-               module_put(ops->owner);
+       if (!mi)
                return -ENOMEM;
-       }
 
        mi->metaid = metaid;
        mi->ops = ops;
@@ -327,29 +313,61 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
                ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
                if (ret != 0) {
                        kfree(mi);
-                       module_put(ops->owner);
                        return ret;
                }
        }
 
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        list_add_tail(&mi->metalist, &ife->metalist);
+       if (exists)
+               spin_unlock_bh(&ife->tcf_lock);
+
+       return ret;
+}
+
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+                                   struct tcf_ife_info *ife, u32 metaid,
+                                   bool exists)
+{
+       int ret;
+
+       if (!try_module_get(ops->owner))
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+       if (ret)
+               module_put(ops->owner);
+       return ret;
+}
+
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+                       int len, bool exists)
+{
+       const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+       int ret;
 
+       if (!ops)
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
+       if (ret)
+               /*put back what find_ife_oplist took */
+               module_put(ops->owner);
        return ret;
 }
 
-static int use_all_metadata(struct tcf_ife_info *ife)
+static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
 {
        struct tcf_meta_ops *o;
        int rc = 0;
        int installed = 0;
 
-       read_lock_bh(&ife_mod_lock);
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = add_metainfo(ife, o->metaid, NULL, 0, true);
+               rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
                if (rc == 0)
                        installed += 1;
        }
-       read_unlock_bh(&ife_mod_lock);
+       read_unlock(&ife_mod_lock);
 
        if (installed)
                return 0;
@@ -396,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
        struct tcf_meta_info *e, *n;
 
        list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
-               module_put(e->ops->owner);
                list_del(&e->metalist);
                if (e->metaval) {
                        if (e->ops->release)
@@ -404,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
                        else
                                kfree(e->metaval);
                }
+               module_put(e->ops->owner);
                kfree(e);
        }
 }
@@ -422,7 +440,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
                kfree_rcu(p, rcu);
 }
 
-/* under ife->tcf_lock for existing action */
 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
                             bool exists, bool rtnl_held)
 {
@@ -436,8 +453,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
                        val = nla_data(tb[i]);
                        len = nla_len(tb[i]);
 
-                       rc = load_metaops_and_vet(ife, i, val, len, exists,
-                                                 rtnl_held);
+                       rc = load_metaops_and_vet(i, val, len, rtnl_held);
                        if (rc != 0)
                                return rc;
 
@@ -540,8 +556,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                p->eth_type = ife_type;
        }
 
-       if (exists)
-               spin_lock_bh(&ife->tcf_lock);
 
        if (ret == ACT_P_CREATED)
                INIT_LIST_HEAD(&ife->metalist);
@@ -551,10 +565,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               spin_unlock_bh(&ife->tcf_lock);
                        tcf_idr_release(*a, bind);
-
                        kfree(p);
                        return err;
                }
@@ -569,17 +580,16 @@ metadata_parse_err:
                 * as we can. You better have at least one else we are
                 * going to bail out
                 */
-               err = use_all_metadata(ife);
+               err = use_all_metadata(ife, exists);
                if (err) {
-                       if (exists)
-                               spin_unlock_bh(&ife->tcf_lock);
                        tcf_idr_release(*a, bind);
-
                        kfree(p);
                        return err;
                }
        }
 
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        ife->tcf_action = parm->action;
        /* protected by tcf_lock when modifying existing action */
        rcu_swap_protected(ife->params, p, 1);
@@ -853,13 +863,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_ife_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, ife_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_ife_ops = {
        .kind = "ife",
        .type = TCA_ACT_IFE,
@@ -870,7 +873,6 @@ static struct tc_action_ops act_ife_ops = {
        .init = tcf_ife_init,
        .walk = tcf_ife_walker,
        .lookup = tcf_ife_search,
-       .delete = tcf_ife_delete,
        .size = sizeof(struct tcf_ife_info),
 };
 
index 51f235bbeb5bccb4267fd70eeec8399494c9f246..23273b5303fd9dcc68cf09ee6f701defe50580b5 100644 (file)
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_ipt_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_ipt_ops = {
        .kind           =       "ipt",
        .type           =       TCA_ACT_IPT,
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
        .init           =       tcf_ipt_init,
        .walk           =       tcf_ipt_walker,
        .lookup         =       tcf_ipt_search,
-       .delete         =       tcf_ipt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_xt_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_xt_ops = {
        .kind           =       "xt",
        .type           =       TCA_ACT_XT,
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
        .init           =       tcf_xt_init,
        .walk           =       tcf_xt_walker,
        .lookup         =       tcf_xt_search,
-       .delete         =       tcf_xt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
index 38fd20f10f6796eaba1402ee572d7712837f1a2c..8bf66d0a6800006e070eacc77519257410696097 100644 (file)
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
        dev_put(dev);
 }
 
-static int tcf_mirred_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
        .type           =       TCA_ACT_MIRRED,
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
        .size           =       sizeof(struct tcf_mirred),
        .get_dev        =       tcf_mirred_get_dev,
        .put_dev        =       tcf_mirred_put_dev,
-       .delete         =       tcf_mirred_delete,
 };
 
 static __net_init int mirred_init_net(struct net *net)
index 822e903bfc25f008d9754d1340a5b4352f4fe59e..4313aa102440e9b55fb0ba200406801217d65284 100644 (file)
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_nat_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, nat_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_nat_ops = {
        .kind           =       "nat",
        .type           =       TCA_ACT_NAT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
        .init           =       tcf_nat_init,
        .walk           =       tcf_nat_walker,
        .lookup         =       tcf_nat_search,
-       .delete         =       tcf_nat_delete,
        .size           =       sizeof(struct tcf_nat),
 };
 
index 8a7a7cb94e8308e8b4d240f957b64080b269f485..ad99a99f11f6de4ad452b501aa8b23a01f156790 100644 (file)
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
 {
        struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
 
+       if (!keys_start)
+               goto nla_failure;
        for (; n > 0; n--) {
                struct nlattr *key_start;
 
                key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+               if (!key_start)
+                       goto nla_failure;
 
                if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
-                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
-                       nlmsg_trim(skb, keys_start);
-                       return -EINVAL;
-               }
+                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
+                       goto nla_failure;
 
                nla_nest_end(skb, key_start);
 
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
        nla_nest_end(skb, keys_start);
 
        return 0;
+nla_failure:
+       nla_nest_cancel(skb, keys_start);
+       return -EINVAL;
 }
 
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
 
        if (p->tcfp_keys_ex) {
-               tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+               if (tcf_pedit_key_ex_dump(skb,
+                                         p->tcfp_keys_ex,
+                                         p->tcfp_nkeys))
+                       goto nla_put_failure;
 
                if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
                        goto nla_put_failure;
@@ -460,13 +468,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_pedit_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_pedit_ops = {
        .kind           =       "pedit",
        .type           =       TCA_ACT_PEDIT,
@@ -477,7 +478,6 @@ static struct tc_action_ops act_pedit_ops = {
        .init           =       tcf_pedit_init,
        .walk           =       tcf_pedit_walker,
        .lookup         =       tcf_pedit_search,
-       .delete         =       tcf_pedit_delete,
        .size           =       sizeof(struct tcf_pedit),
 };
 
index 06f0742db593166370cfb69f1ef8a687e29007be..5d8bfa878477e8e738be55d2c9b818423c3c8ccc 100644 (file)
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_police_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, police_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 MODULE_AUTHOR("Alexey Kuznetsov");
 MODULE_DESCRIPTION("Policing actions");
 MODULE_LICENSE("GPL");
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
        .init           =       tcf_police_init,
        .walk           =       tcf_police_walker,
        .lookup         =       tcf_police_search,
-       .delete         =       tcf_police_delete,
        .size           =       sizeof(struct tcf_police),
 };
 
index 207b4132d1b066a394467ace6cf454dbce53a33f..6b67aa13d2dd09bc2a8fe8d6749ca570f94abe37 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
-                                    &act_sample_ops, bind, false);
+                                    &act_sample_ops, bind, true);
                if (ret) {
                        tcf_idr_cleanup(tn, parm->index);
                        return ret;
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_sample_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, sample_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_sample_ops = {
        .kind     = "sample",
        .type     = TCA_ACT_SAMPLE,
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
        .cleanup  = tcf_sample_cleanup,
        .walk     = tcf_sample_walker,
        .lookup   = tcf_sample_search,
-       .delete   = tcf_sample_delete,
        .size     = sizeof(struct tcf_sample),
 };
 
index e616523ba3c15f9126146240d440d96849ac4ded..52400d49f81f233a572de8cdffff9c94099e614a 100644 (file)
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_simp_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, simp_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_simp_ops = {
        .kind           =       "simple",
        .type           =       TCA_ACT_SIMP,
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
        .init           =       tcf_simp_init,
        .walk           =       tcf_simp_walker,
        .lookup         =       tcf_simp_search,
-       .delete         =       tcf_simp_delete,
        .size           =       sizeof(struct tcf_defact),
 };
 
index 926d7bc4a89d9db85078677f1bdb9d6c57a767fc..73e44ce2a8837d9151b77de1e1bf619872af217c 100644 (file)
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_skbedit_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_skbedit_ops = {
        .kind           =       "skbedit",
        .type           =       TCA_ACT_SKBEDIT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
        .cleanup        =       tcf_skbedit_cleanup,
        .walk           =       tcf_skbedit_walker,
        .lookup         =       tcf_skbedit_search,
-       .delete         =       tcf_skbedit_delete,
        .size           =       sizeof(struct tcf_skbedit),
 };
 
index d6a1af0c41712cf6e61bcdaefc06959e616efa21..588077fafd6cc58473b1d0de85b1aa3103b3bf69 100644 (file)
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_skbmod_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_skbmod_ops = {
        .kind           =       "skbmod",
        .type           =       TCA_ACT_SKBMOD,
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
        .cleanup        =       tcf_skbmod_cleanup,
        .walk           =       tcf_skbmod_walker,
        .lookup         =       tcf_skbmod_search,
-       .delete         =       tcf_skbmod_delete,
        .size           =       sizeof(struct tcf_skbmod),
 };
 
index 8f09cf08d8fe1242cdbbc1951124e0f19ace18ae..681f6f04e7dae38685b7abd8e4d3a313803a4517 100644 (file)
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                                  &metadata->u.tun_info,
                                                  opts_len, extack);
                        if (ret < 0)
-                               goto err_out;
+                               goto release_tun_meta;
                }
 
                metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                     &act_tunnel_key_ops, bind, true);
                if (ret) {
                        NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
-                       goto err_out;
+                       goto release_tun_meta;
                }
 
                ret = ACT_P_CREATED;
        } else if (!ovr) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "TC IDR already exists");
-               return -EEXIST;
+               ret = -EEXIST;
+               goto release_tun_meta;
        }
 
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               exists = true;
+               goto release_tun_meta;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        return ret;
 
+release_tun_meta:
+       dst_release(&metadata->dst);
+
 err_out:
        if (exists)
                tcf_idr_release(*a, bind);
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
                    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
                               opt->type) ||
                    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
-                           opt->length * 4, opt + 1))
+                           opt->length * 4, opt + 1)) {
+                       nla_nest_cancel(skb, start);
                        return -EMSGSIZE;
+               }
 
                len -= sizeof(struct geneve_opt) + opt->length * 4;
                src += sizeof(struct geneve_opt) + opt->length * 4;
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
                                const struct ip_tunnel_info *info)
 {
        struct nlattr *start;
-       int err;
+       int err = -EINVAL;
 
        if (!info->options_len)
                return 0;
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
        if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
                err = tunnel_key_geneve_opts_dump(skb, info);
                if (err)
-                       return err;
+                       goto err_out;
        } else {
-               return -EINVAL;
+err_out:
+               nla_nest_cancel(skb, start);
+               return err;
        }
 
        nla_nest_end(skb, start);
@@ -548,13 +556,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tunnel_key_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_tunnel_key_ops = {
        .kind           =       "tunnel_key",
        .type           =       TCA_ACT_TUNNEL_KEY,
@@ -565,7 +566,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
        .cleanup        =       tunnel_key_release,
        .walk           =       tunnel_key_walker,
        .lookup         =       tunnel_key_search,
-       .delete         =       tunnel_key_delete,
        .size           =       sizeof(struct tcf_tunnel_key),
 };
 
index 209e70ad2c09a0b0e9567f24b33108f7b5e1f1e4..033d273afe50236a090fd4caddd0a8328ce82157 100644 (file)
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_vlan_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_vlan_ops = {
        .kind           =       "vlan",
        .type           =       TCA_ACT_VLAN,
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
        .cleanup        =       tcf_vlan_cleanup,
        .walk           =       tcf_vlan_walker,
        .lookup         =       tcf_vlan_search,
-       .delete         =       tcf_vlan_delete,
        .size           =       sizeof(struct tcf_vlan),
 };
 
index 31bd1439cf6059fbcb3e99525e4a964154ef7907..0a75cb2e5e7ba2d83b063956c0ce91af25679cb1 100644 (file)
@@ -1252,7 +1252,7 @@ replay:
        }
        chain = tcf_chain_get(block, chain_index, true);
        if (!chain) {
-               NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+               NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
                err = -ENOMEM;
                goto errout;
        }
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                        goto errout;
                }
                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
-               err = -EINVAL;
+               err = -ENOENT;
                goto errout;
        }
 
@@ -1902,6 +1902,8 @@ replay:
                                RTM_NEWCHAIN, false);
                break;
        case RTM_DELCHAIN:
+               tfilter_notify_chain(net, skb, block, q, parent, n,
+                                    chain, RTM_DELTFILTER);
                /* Flush the chain first as the user requested chain removal. */
                tcf_chain_flush(chain);
                /* In case the chain was successfully deleted, put a reference
index d5d2a6dc39216b0ca28bd11094f0b64fda5c5964..f218ccf1e2d9a651ad07c2a6276742b97d3b2102 100644 (file)
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_U32_MAX + 1];
        u32 htid, flags = 0;
+       size_t sel_size;
        int err;
 #ifdef CONFIG_CLS_U32_PERF
        size_t size;
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 
        s = nla_data(tb[TCA_U32_SEL]);
+       sel_size = struct_size(s, keys, s->nkeys);
+       if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
+               err = -EINVAL;
+               goto erridr;
+       }
 
-       n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+       n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
        if (n == NULL) {
                err = -ENOBUFS;
                goto erridr;
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 #endif
 
-       memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+       memcpy(&n->sel, s, sel_size);
        RCU_INIT_POINTER(n->ht_up, ht);
        n->handle = handle;
        n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
index 35fc7252187c1f54fbcbceb827d7270381b2da5b..c07c30b916d5e4d7b7fef5586f92df366194da85 100644 (file)
@@ -64,7 +64,6 @@
 #include <linux/vmalloc.h>
 #include <linux/reciprocal_div.h>
 #include <net/netlink.h>
-#include <linux/version.h>
 #include <linux/if_vlan.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
 }
 
 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-                    int flow_mode)
+                    int flow_mode, u16 flow_override, u16 host_override)
 {
-       u32 flow_hash = 0, srchost_hash, dsthost_hash;
+       u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
        u16 reduced_hash, srchost_idx, dsthost_idx;
        struct flow_keys keys, host_keys;
 
        if (unlikely(flow_mode == CAKE_FLOW_NONE))
                return 0;
 
+       /* If both overrides are set we can skip packet dissection entirely */
+       if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
+           (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
+               goto skip_hash;
+
        skb_flow_dissect_flow_keys(skb, &keys,
                                   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
        if (flow_mode & CAKE_FLOW_FLOWS)
                flow_hash = flow_hash_from_keys(&keys);
 
+skip_hash:
+       if (flow_override)
+               flow_hash = flow_override - 1;
+       if (host_override) {
+               dsthost_hash = host_override - 1;
+               srchost_hash = host_override - 1;
+       }
+
        if (!(flow_mode & CAKE_FLOW_FLOWS)) {
                if (flow_mode & CAKE_FLOW_SRC_IP)
                        flow_hash ^= srchost_hash;
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
        struct cake_sched_data *q = qdisc_priv(sch);
        struct tcf_proto *filter;
        struct tcf_result res;
-       u32 flow = 0;
+       u16 flow = 0, host = 0;
        int result;
 
        filter = rcu_dereference_bh(q->filter_list);
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
 #endif
                if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
                        flow = TC_H_MIN(res.classid);
+               if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
+                       host = TC_H_MAJ(res.classid) >> 16;
        }
 hash:
        *t = cake_select_tin(sch, skb);
-       return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+       return cake_hash(*t, skb, flow_mode, flow, host) + 1;
 }
 
 static void cake_reconfigure(struct Qdisc *sch);
index ef5c9a82d4e896b506271cb229b07ee570c917f3..a644292f9fafd6cff9219c7fe6b64bfcc7bc898b 100644 (file)
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
 struct sctp_ht_iter {
        struct seq_net_private p;
        struct rhashtable_iter hti;
-       int start_fail;
 };
 
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 
        sctp_transport_walk_start(&iter->hti);
 
-       iter->start_fail = 0;
        return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
 
-       if (iter->start_fail)
-               return;
        sctp_transport_walk_stop(&iter->hti);
 }
 
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
index e96b15a66abaa481b64db1ebd34f75d59d724c3d..f73e9d38d5ba734d7ee3347e4015fd30d355bbfa 100644 (file)
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        }
 
        if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
-               if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
-                       trans->flowlabel = params->spp_ipv6_flowlabel &
-                                          SCTP_FLOWLABEL_VAL_MASK;
-                       trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
-               } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
-                                           transports) {
-                               if (trans->ipaddr.sa.sa_family != AF_INET6)
-                                       continue;
+               if (trans) {
+                       if (trans->ipaddr.sa.sa_family == AF_INET6) {
                                trans->flowlabel = params->spp_ipv6_flowlabel &
                                                   SCTP_FLOWLABEL_VAL_MASK;
                                trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
                        }
+               } else if (asoc) {
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
+                                           transports) {
+                               if (t->ipaddr.sa.sa_family != AF_INET6)
+                                       continue;
+                               t->flowlabel = params->spp_ipv6_flowlabel &
+                                              SCTP_FLOWLABEL_VAL_MASK;
+                               t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+                       }
                        asoc->flowlabel = params->spp_ipv6_flowlabel &
                                          SCTP_FLOWLABEL_VAL_MASK;
                        asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        trans->dscp |= SCTP_DSCP_SET_MASK;
                } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                            transports) {
-                               trans->dscp = params->spp_dscp &
-                                             SCTP_DSCP_VAL_MASK;
-                               trans->dscp |= SCTP_DSCP_SET_MASK;
+                               t->dscp = params->spp_dscp &
+                                         SCTP_DSCP_VAL_MASK;
+                               t->dscp |= SCTP_DSCP_SET_MASK;
                        }
                        asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
                        break;
                }
 
+               if (!sctp_transport_hold(t))
+                       continue;
+
                if (net_eq(sock_net(t->asoc->base.sk), net) &&
                    t->asoc->peer.primary_path == t)
                        break;
+
+               sctp_transport_put(t);
        }
 
        return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
                                              struct rhashtable_iter *iter,
                                              int pos)
 {
-       void *obj = SEQ_START_TOKEN;
+       struct sctp_transport *t;
 
-       while (pos && (obj = sctp_transport_get_next(net, iter)) &&
-              !IS_ERR(obj))
-               pos--;
+       if (!pos)
+               return SEQ_START_TOKEN;
 
-       return obj;
+       while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+               if (!--pos)
+                       break;
+               sctp_transport_put(t);
+       }
+
+       return t;
 }
 
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
 
        tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
        for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
-               if (!sctp_transport_hold(tsp))
-                       continue;
                ret = cb(tsp, p);
                if (ret)
                        break;
index e6945e318f0285441c0095903e099cb272fa78ec..01f3f8f32d6f927fd77bac5920ab5d8339c2f6af 100644 (file)
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
 EXPORT_SYMBOL(dlci_ioctl_set);
 
 static long sock_do_ioctl(struct net *net, struct socket *sock,
-                                unsigned int cmd, unsigned long arg)
+                         unsigned int cmd, unsigned long arg,
+                         unsigned int ifreq_size)
 {
        int err;
        void __user *argp = (void __user *)arg;
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
        } else {
                struct ifreq ifr;
                bool need_copyout;
-               if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+               if (copy_from_user(&ifr, argp, ifreq_size))
                        return -EFAULT;
                err = dev_ioctl(net, cmd, &ifr, &need_copyout);
                if (!err && need_copyout)
-                       if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+                       if (copy_to_user(argp, &ifr, ifreq_size))
                                return -EFAULT;
        }
        return err;
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                        err = open_related_ns(&net->ns, get_net_ns);
                        break;
                default:
-                       err = sock_do_ioctl(net, sock, cmd, arg);
+                       err = sock_do_ioctl(net, sock, cmd, arg,
+                                           sizeof(struct ifreq));
                        break;
                }
        return err;
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
        if (!err)
                err = compat_put_timeval(&ktv, up);
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
        if (!err)
                err = compat_put_timespec(&kts, up);
@@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
        }
 
        set_fs(KERNEL_DS);
-       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
+       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
 
 out:
@@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
        case SIOCBONDSETHWADDR:
        case SIOCBONDCHANGEACTIVE:
        case SIOCGIFNAME:
-               return sock_do_ioctl(net, sock, cmd, arg);
+               return sock_do_ioctl(net, sock, cmd, arg,
+                                    sizeof(struct compat_ifreq));
        }
 
        return -ENOIOCTLCMD;
index 9ee6cfea56dd015851302f1702f9e068803d01dd..d8026543bf4ce53634cb2d6fc114e57cdb74b21f 100644 (file)
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
  * struct tipc_bc_base - base structure for keeping broadcast send state
  * @link: broadcast send link structure
  * @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
  * @primary_bearer: a bearer having links to all broadcast destinations, if any
  * @bcast_support: indicates if primary bearer, if any, supports broadcast
  * @rcast_support: indicates if all peer nodes support replicast
  * @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  */
 struct tipc_bc_base {
        struct tipc_link *link;
index aaabb0b776dda6041defb902804e08388e39f9c5..73137f4aeb68f95677e34a3b2d5e823565024483 100644 (file)
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
 
        if (h->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = tipc_dump_start,
                        .dump = tipc_diag_dump,
+                       .done = tipc_dump_done,
                };
                netlink_dump_start(net->diag_nlsk, skb, h, &c);
                return 0;
index 88f027b502f6f5ed955b03bd645321ea8e79eec3..66d5b2c5987ad6a2e05ca45f236ade58459ac0bf 100644 (file)
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        list_for_each_entry(dst, l, list) {
-               if (dst->value != value)
-                       continue;
-               return dst;
+               if (dst->node == node && dst->port == port)
+                       return dst;
        }
        return NULL;
 }
 
 bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
        dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
        if (unlikely(!dst))
                return false;
-       dst->value = value;
+       dst->node = node;
+       dst->port = port;
        list_add(&dst->list, l);
        return true;
 }
index 0febba41da86ed64df54f447e7eb9b63cbea30cf..892bd750b85fa48639b15af9bbeb6d437a7660a7 100644 (file)
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
 
 struct tipc_dest {
        struct list_head list;
-       union {
-               struct {
-                       u32 port;
-                       u32 node;
-               };
-               u64 value;
-       };
+       u32 port;
+       u32 node;
 };
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
index 6ff2254088f647d4f7410c3335ccdae2e68ec522..99ee419210baf4374d438b138ab922034950ba7e 100644 (file)
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
        },
        {
                .cmd    = TIPC_NL_SOCK_GET,
+               .start = tipc_dump_start,
                .dumpit = tipc_nl_sk_dump,
+               .done   = tipc_dump_done,
                .policy = tipc_nl_policy,
        },
        {
index a2f76743c73af07bdfe3ed524aebb6d5bb000dbc..6376467e78f862c25ffae531848bd7aded07609e 100644 (file)
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
 
        buf->sk = msg->dst_sk;
+       if (__tipc_dump_start(&cb, msg->net)) {
+               kfree_skb(buf);
+               return -ENOMEM;
+       }
 
        do {
                int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        err = 0;
 
 err_out:
+       tipc_dump_done(&cb);
        kfree_skb(buf);
 
        if (err == -EMSGSIZE) {
index c1e93c9515bca5abaae647f18402a8bc25bfa2b4..3f03ddd0e35b2f1b6acad1c788faee1976924b3b 100644 (file)
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
        sk_stop_timer(sk, &sk->sk_timer);
        tipc_sk_remove(tsk);
 
+       sock_orphan(sk);
        /* Reject any messages that accumulated in backlog queue */
        release_sock(sk);
        tipc_dest_list_purge(&tsk->cong_links);
@@ -2672,6 +2673,8 @@ void tipc_sk_reinit(struct net *net)
 
                rhashtable_walk_stop(&iter);
        } while (tsk == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3230,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk))
 {
-       struct net *net = sock_net(skb->sk);
-       struct tipc_net *tn = tipc_net(net);
-       const struct bucket_table *tbl;
-       u32 prev_portid = cb->args[1];
-       u32 tbl_id = cb->args[0];
-       struct rhash_head *pos;
+       struct rhashtable_iter *iter = (void *)cb->args[4];
        struct tipc_sock *tsk;
        int err;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-       for (; tbl_id < tbl->size; tbl_id++) {
-               rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
-                       if (prev_portid && prev_portid != tsk->portid) {
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+       rhashtable_walk_start(iter);
+       while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+               if (IS_ERR(tsk)) {
+                       err = PTR_ERR(tsk);
+                       if (err == -EAGAIN) {
+                               err = 0;
                                continue;
                        }
+                       break;
+               }
 
-                       err = skb_handler(skb, cb, tsk);
-                       if (err) {
-                               prev_portid = tsk->portid;
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
-                               goto out;
-                       }
-
-                       prev_portid = 0;
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               sock_hold(&tsk->sk);
+               rhashtable_walk_stop(iter);
+               lock_sock(&tsk->sk);
+               err = skb_handler(skb, cb, tsk);
+               if (err) {
+                       release_sock(&tsk->sk);
+                       sock_put(&tsk->sk);
+                       goto out;
                }
+               release_sock(&tsk->sk);
+               rhashtable_walk_start(iter);
+               sock_put(&tsk->sk);
        }
+       rhashtable_walk_stop(iter);
 out:
-       rcu_read_unlock();
-       cb->args[0] = tbl_id;
-       cb->args[1] = prev_portid;
-
        return skb->len;
 }
 EXPORT_SYMBOL(tipc_nl_sk_walk);
 
+int tipc_dump_start(struct netlink_callback *cb)
+{
+       return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+       /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+       struct rhashtable_iter *iter = (void *)cb->args[4];
+       struct tipc_net *tn = tipc_net(net);
+
+       if (!iter) {
+               iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+               if (!iter)
+                       return -ENOMEM;
+
+               cb->args[4] = (long)iter;
+       }
+
+       rhashtable_walk_enter(&tn->sk_rht, iter);
+       return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+       struct rhashtable_iter *hti = (void *)cb->args[4];
+
+       rhashtable_walk_exit(hti);
+       kfree(hti);
+       return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tipc_sock *tsk, u32 sk_filter_state,
                           u64 (*tipc_diag_gen_cookie)(struct sock *sk))
index aff9b2ae5a1f448d64d72927686d28146dfd5d7d..5e575f205afe4088f94f6f0c60f4200cc83b9b75 100644 (file)
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                    int (*skb_handler)(struct sk_buff *skb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
 #endif
index c8e34ef22c30223d57ea8254d239331402040742..2627b5d812e9040b11396f34f8bc31fb5da3956a 100644 (file)
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
        conn_put(con);
 }
 
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
  */
 void tipc_topsrv_queue_evt(struct net *net, int conid,
                           u32 event, struct tipc_event *evt)
index 292742e50bfa4b3a540cbaa3eb2b07400e7141be..961b07d4d41ca7a677b1fd0d24f490888d784e0a 100644 (file)
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                goto free_marker_record;
        }
 
-       crypto_info = &ctx->crypto_send;
+       crypto_info = &ctx->crypto_send.info;
        switch (crypto_info->cipher_type) {
        case TLS_CIPHER_AES_GCM_128:
                nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 
        ctx->priv_ctx_tx = offload_ctx;
        rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
-                                            &ctx->crypto_send,
+                                            &ctx->crypto_send.info,
                                             tcp_sk(sk)->write_seq);
        if (rc)
                goto release_netdev;
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
                goto release_ctx;
 
        rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
-                                            &ctx->crypto_recv,
+                                            &ctx->crypto_recv.info,
                                             tcp_sk(sk)->copied_seq);
        if (rc) {
                pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
index 6102169239d1343487a5a0c676384cd3ae4228e5..450a6dbc5a888421c36f71f85c31365d6138562b 100644 (file)
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
                goto free_req;
 
        iv = buf;
-       memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
+       memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
               TLS_CIPHER_AES_GCM_128_SALT_SIZE);
        aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
              TLS_CIPHER_AES_GCM_128_IV_SIZE;
index 93c0c225ab340ae0f0de3c5c2e6b2a149579c19e..523622dc74f8b969113b0435b39f5d0f3d070304 100644 (file)
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
 
-       /* We are already sending pages, ignore notification */
-       if (ctx->in_tcp_sendpages)
+       /* If in_tcp_sendpages call lower protocol write space handler
+        * to ensure we wake up any waiting operations there. For example
+        * if do_tcp_sendpages where to call sk_wait_event.
+        */
+       if (ctx->in_tcp_sendpages) {
+               ctx->sk_write_space(sk);
                return;
+       }
 
        if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
                gfp_t sk_allocation = sk->sk_allocation;
@@ -236,6 +241,16 @@ static void tls_write_space(struct sock *sk)
        ctx->sk_write_space(sk);
 }
 
+static void tls_ctx_free(struct tls_context *ctx)
+{
+       if (!ctx)
+               return;
+
+       memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+       memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+       kfree(ctx);
+}
+
 static void tls_sk_proto_close(struct sock *sk, long timeout)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -289,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
 #else
        {
 #endif
-               kfree(ctx);
+               tls_ctx_free(ctx);
                ctx = NULL;
        }
 
@@ -300,7 +315,7 @@ skip_tx_cleanup:
         * for sk->sk_prot->unhash [tls_hw_unhash]
         */
        if (free_ctx)
-               kfree(ctx);
+               tls_ctx_free(ctx);
 }
 
 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
@@ -325,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
        }
 
        /* get user crypto info */
-       crypto_info = &ctx->crypto_send;
+       crypto_info = &ctx->crypto_send.info;
 
        if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
                rc = -EBUSY;
@@ -412,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
        }
 
        if (tx)
-               crypto_info = &ctx->crypto_send;
+               crypto_info = &ctx->crypto_send.info;
        else
-               crypto_info = &ctx->crypto_recv;
+               crypto_info = &ctx->crypto_recv.info;
 
        /* Currently we don't support set crypto info more than one time */
        if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -494,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
        goto out;
 
 err_crypto_info:
-       memset(crypto_info, 0, sizeof(*crypto_info));
+       memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
 out:
        return rc;
 }
index 52fbe727d7c10c7e723238ee28ce05a11a720e77..b9c6ecfbcfea722b71c7c048cf19388527e00bb7 100644 (file)
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
                         &ctx->sg_encrypted_num_elem,
                         &ctx->sg_encrypted_size, 0);
 
+       if (rc == -ENOSPC)
+               ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+
        return rc;
 }
 
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
                         &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
                         tls_ctx->pending_open_record_frags);
 
+       if (rc == -ENOSPC)
+               ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+
        return rc;
 }
 
@@ -925,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk,
                                if (control != TLS_RECORD_TYPE_DATA)
                                        goto recv_end;
                        }
+               } else {
+                       /* MSG_PEEK right now cannot look beyond current skb
+                        * from strparser, meaning we cannot advance skb here
+                        * and thus unpause strparser since we'd loose original
+                        * one.
+                        */
+                       break;
                }
+
                /* If we have a new message from strparser, continue now. */
                if (copied >= target && !ctx->recv_pkt)
                        break;
@@ -1049,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
                goto read_failure;
        }
 
-       if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
-           header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
+       if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
+           header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
                ret = -EINVAL;
                goto read_failure;
        }
@@ -1130,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
 
 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 {
-       char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
        struct tls_crypto_info *crypto_info;
        struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
        struct tls_sw_context_tx *sw_ctx_tx = NULL;
@@ -1175,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 
        if (tx) {
                crypto_init_wait(&sw_ctx_tx->async_wait);
-               crypto_info = &ctx->crypto_send;
+               crypto_info = &ctx->crypto_send.info;
                cctx = &ctx->tx;
                aead = &sw_ctx_tx->aead_send;
        } else {
                crypto_init_wait(&sw_ctx_rx->async_wait);
-               crypto_info = &ctx->crypto_recv;
+               crypto_info = &ctx->crypto_recv.info;
                cctx = &ctx->rx;
                aead = &sw_ctx_rx->aead_recv;
        }
@@ -1259,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 
        ctx->push_pending_record = tls_sw_push_pending_record;
 
-       memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
-       rc = crypto_aead_setkey(*aead, keyval,
+       rc = crypto_aead_setkey(*aead, gcm_128_info->key,
                                TLS_CIPHER_AES_GCM_128_KEY_SIZE);
        if (rc)
                goto free_aead;
index 5fb9b7dd98318b6e9d4842474361104855b42983..4b8ec659e797ff743267773e315c6220b90993d0 100644 (file)
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
                        goto nla_put_failure;
 
                if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
-                               rule->wmm_rule->client[j].cw_min) ||
+                               rule->wmm_rule.client[j].cw_min) ||
                    nla_put_u16(msg, NL80211_WMMR_CW_MAX,
-                               rule->wmm_rule->client[j].cw_max) ||
+                               rule->wmm_rule.client[j].cw_max) ||
                    nla_put_u8(msg, NL80211_WMMR_AIFSN,
-                              rule->wmm_rule->client[j].aifsn) ||
-                   nla_put_u8(msg, NL80211_WMMR_TXOP,
-                              rule->wmm_rule->client[j].cot))
+                              rule->wmm_rule.client[j].aifsn) ||
+                   nla_put_u16(msg, NL80211_WMMR_TXOP,
+                               rule->wmm_rule.client[j].cot))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
 
        if (large) {
                const struct ieee80211_reg_rule *rule =
-                       freq_reg_info(wiphy, chan->center_freq);
+                       freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
 
-               if (!IS_ERR(rule) && rule->wmm_rule) {
+               if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
                        if (nl80211_msg_put_wmm_rules(msg, rule))
                                goto nla_put_failure;
                }
@@ -12205,6 +12205,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_MDID] ||
+           !info->attrs[NL80211_ATTR_IE] ||
            !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
index 4fc66a117b7d74f86a1589a7a02b88f02f203b7d..2f702adf2912105947560d07d79ff86119333bbf 100644 (file)
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
        struct ieee80211_regdomain *regd;
-       int size_of_regd, size_of_wmms;
+       int size_of_regd;
        unsigned int i;
-       struct ieee80211_wmm_rule *d_wmm, *s_wmm;
 
        size_of_regd =
                sizeof(struct ieee80211_regdomain) +
                src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
-       size_of_wmms = src_regd->n_wmm_rules *
-               sizeof(struct ieee80211_wmm_rule);
 
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
        memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
 
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
-       memcpy(d_wmm, s_wmm, size_of_wmms);
-
-       for (i = 0; i < src_regd->n_reg_rules; i++) {
+       for (i = 0; i < src_regd->n_reg_rules; i++)
                memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
                       sizeof(struct ieee80211_reg_rule));
-               if (!src_regd->reg_rules[i].wmm_rule)
-                       continue;
 
-               regd->reg_rules[i].wmm_rule = d_wmm +
-                       (src_regd->reg_rules[i].wmm_rule - s_wmm) /
-                       sizeof(struct ieee80211_wmm_rule);
-       }
        return regd;
 }
 
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
        return true;
 }
 
-static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
                         struct fwdb_wmm_rule *wmm)
 {
+       struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
        unsigned int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
                rule->ap[i].aifsn = wmm->ap[i].aifsn;
                rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
        }
+
+       rrule->has_wmm = true;
 }
 
 static int __regdb_query_wmm(const struct fwdb_header *db,
                             const struct fwdb_country *country, int freq,
-                            u32 *dbptr, struct ieee80211_wmm_rule *rule)
+                            struct ieee80211_reg_rule *rule)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
                        wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
                        wmm = (void *)((u8 *)db + wmm_ptr);
                        set_wmm_rule(rule, wmm);
-                       if (dbptr)
-                               *dbptr = wmm_ptr;
                        return 0;
                }
        }
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
        return -ENODATA;
 }
 
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
-                       struct ieee80211_wmm_rule *rule)
+int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
 {
        const struct fwdb_header *hdr = regdb;
        const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
        country = &hdr->country[0];
        while (country->coll_ptr) {
                if (alpha2_equal(alpha2, country->alpha2))
-                       return __regdb_query_wmm(regdb, country, freq, dbptr,
-                                                rule);
+                       return __regdb_query_wmm(regdb, country, freq, rule);
 
                country++;
        }
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
 }
 EXPORT_SYMBOL(reg_query_regdb_wmm);
 
-struct wmm_ptrs {
-       struct ieee80211_wmm_rule *rule;
-       u32 ptr;
-};
-
-static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
-                                              u32 wmm_ptr, int n_wmms)
-{
-       int i;
-
-       for (i = 0; i < n_wmms; i++) {
-               if (wmm_ptrs[i].ptr == wmm_ptr)
-                       return wmm_ptrs[i].rule;
-       }
-       return NULL;
-}
-
 static int regdb_query_country(const struct fwdb_header *db,
                               const struct fwdb_country *country)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
        struct ieee80211_regdomain *regdom;
-       struct ieee80211_regdomain *tmp_rd;
-       unsigned int size_of_regd, i, n_wmms = 0;
-       struct wmm_ptrs *wmm_ptrs;
+       unsigned int size_of_regd, i;
 
        size_of_regd = sizeof(struct ieee80211_regdomain) +
                coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
        if (!regdom)
                return -ENOMEM;
 
-       wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
-       if (!wmm_ptrs) {
-               kfree(regdom);
-               return -ENOMEM;
-       }
-
        regdom->n_reg_rules = coll->n_rules;
        regdom->alpha2[0] = country->alpha2[0];
        regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
                                1000 * be16_to_cpu(rule->cac_timeout);
                if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
                        u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
-                       struct ieee80211_wmm_rule *wmm_pos =
-                               find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
-                       struct fwdb_wmm_rule *wmm;
-                       struct ieee80211_wmm_rule *wmm_rule;
-
-                       if (wmm_pos) {
-                               rrule->wmm_rule = wmm_pos;
-                               continue;
-                       }
-                       wmm = (void *)((u8 *)db + wmm_ptr);
-                       tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
-                                         sizeof(struct ieee80211_wmm_rule),
-                                         GFP_KERNEL);
-
-                       if (!tmp_rd) {
-                               kfree(regdom);
-                               kfree(wmm_ptrs);
-                               return -ENOMEM;
-                       }
-                       regdom = tmp_rd;
-
-                       wmm_rule = (struct ieee80211_wmm_rule *)
-                               ((u8 *)regdom + size_of_regd + n_wmms *
-                               sizeof(struct ieee80211_wmm_rule));
+                       struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
 
-                       set_wmm_rule(wmm_rule, wmm);
-                       wmm_ptrs[n_wmms].ptr = wmm_ptr;
-                       wmm_ptrs[n_wmms++].rule = wmm_rule;
+                       set_wmm_rule(rrule, wmm);
                }
        }
-       kfree(wmm_ptrs);
 
        return reg_schedule_apply(regdom);
 }
index e0825a019e9fb255adc2f4f749b08e241b2c2dde..959ed3acd2407c8c4beb4ce91b625315bf926109 100644 (file)
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
                                          u8 *op_class)
 {
        u8 vht_opclass;
-       u16 freq = chandef->center_freq1;
+       u32 freq = chandef->center_freq1;
 
        if (freq >= 2412 && freq <= 2472) {
                if (chandef->width > NL80211_CHAN_WIDTH_40)
index 911ca6d3cb5a6cd7d056a04cf3df57d5833bafeb..bfe2dbea480ba8ef54e0cc24659aa15d267aea2a 100644 (file)
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
                return 0;
 
        if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
-               return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+               return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
 
        bpf.command = XDP_QUERY_XSK_UMEM;
 
        rtnl_lock();
        err = xdp_umem_query(dev, queue_id);
        if (err) {
-               err = err < 0 ? -ENOTSUPP : -EBUSY;
+               err = err < 0 ? -EOPNOTSUPP : -EBUSY;
                goto err_rtnl_unlock;
        }
 
index 7c98f60e226677ca8c0c81a1c665d150a0906f2a..ce53639a864a14fa13d85710af4b875fd477217f 100644 (file)
@@ -153,18 +153,14 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
 cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
-# cc-if-fullversion
-# Usage:  EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
-cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
-
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 cc-ldoption = $(call try-run,\
        $(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
 
 # ld-option
-# Usage: LDFLAGS += $(call ld-option, -X, -Y)
-ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2),$(3))
+# Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
+ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
 
 # ar-option
 # Usage: KBUILD_ARFLAGS := $(call ar-option,D)
index 93b8e24b0e15e5049f0edeb9d8a47e7fa72345b3..5a2d1c9578a0ce19d764dcc8d84740234feb58c6 100644 (file)
@@ -190,7 +190,7 @@ cmd_modversions_c =                                                         \
                $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))  \
                    > $(@D)/.tmp_$(@F:.o=.ver);                                 \
                                                                                \
-               $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F)                      \
+               $(LD) $(KBUILD_LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F)               \
                        -T $(@D)/.tmp_$(@F:.o=.ver);                            \
                rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver);                \
        else                                                                    \
@@ -220,7 +220,7 @@ sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH
        "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
        "$(if $(CONFIG_64BIT),64,32)" \
        "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
-       "$(LD)" "$(NM)" "$(RM)" "$(MV)" \
+       "$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
@@ -246,8 +246,6 @@ objtool_args += --no-fp
 endif
 ifdef CONFIG_GCOV_KERNEL
 objtool_args += --no-unreachable
-else
-objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
 ifdef CONFIG_RETPOLINE
 ifneq ($(RETPOLINE_CFLAGS),)
@@ -394,7 +392,7 @@ cmd_modversions_S =                                                         \
                $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))  \
                    > $(@D)/.tmp_$(@F:.o=.ver);                                 \
                                                                                \
-               $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F)                      \
+               $(LD) $(KBUILD_LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F)               \
                        -T $(@D)/.tmp_$(@F:.o=.ver);                            \
                rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver);                \
        else                                                                    \
index df0fff2526194d37641b4bf34a865c2a3a867635..61e596650ed314c7d655fbabb3fce675f41bb4ed 100644 (file)
@@ -162,7 +162,7 @@ a_flags        = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
 cpp_flags      = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE)     \
                 $(__cpp_flags)
 
-ld_flags       = $(LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
+ld_flags       = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
 
 DTC_INCLUDE    := $(srctree)/scripts/dtc/include-prefixes
 
index dd92dbbbaa687b73f31b922187c0da15f66266e0..7d4af0d0accb34a990098a80062d289168c8fe5e 100644 (file)
@@ -120,7 +120,7 @@ ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
 # Step 6), final link of the modules with optional arch pass after final link
 quiet_cmd_ld_ko_o = LD [M]  $@
       cmd_ld_ko_o =                                                     \
-       $(LD) -r $(LDFLAGS)                                             \
+       $(LD) -r $(KBUILD_LDFLAGS)                                      \
                  $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE)             \
                  -o $@ $(filter-out FORCE,$^) ;                         \
        $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
index 5219280bf7ff351a742db19e6a35748f77efbeb8..161b0224d6ae9ca927269664eaded3bcce41e91f 100755 (executable)
@@ -380,6 +380,7 @@ our $Attribute      = qr{
                        __noclone|
                        __deprecated|
                        __read_mostly|
+                       __ro_after_init|
                        __kprobes|
                        $InitAttribute|
                        ____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
                        # known declaration macros
                      $sline =~ /^\+\s+$declaration_macros/ ||
                        # start of struct or union or enum
-                     $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+                     $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
                        # start or end of block or continuation of declaration
                      $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
                        # bitfield continuation
index dbf0a31eb111c9ea166f090f78932a0b51d68faa..e65fbc3079d47bec7474aba6f3fbf96d258f2d99 100755 (executable)
@@ -12,7 +12,7 @@
 
 compiler="$*"
 
-if !( $compiler --version | grep -q clang) ; then
+if ! ( $compiler --version | grep -q clang) ; then
        echo 0
        exit 1
 fi
index 92b20913055f9c911451a3a4ff0205da29237b21..d819275b7fde99601de25f7afc7cf841e69778a6 100644 (file)
@@ -35,8 +35,7 @@ statement S;
 
 * x = (T)\(kmalloc(E1, ...)\|vmalloc(E1)\|dma_alloc_coherent(...,E1,...)\|
   kmalloc_node(E1, ...)\|kmem_cache_alloc(...)\|kmem_alloc(E1, ...)\|
-  devm_kmalloc(...,E1,...)\|kvmalloc(E1, ...)\|pci_alloc_consistent(...,E1,...)\|
-  kvmalloc_node(E1,...)\);
+  devm_kmalloc(...,E1,...)\|kvmalloc(E1, ...)\|kvmalloc_node(E1,...)\);
   if ((x==NULL) || ...) S
 * memset((T2)x,0,E1);
 
@@ -124,15 +123,6 @@ statement S;
 - x = (T)kvmalloc(E1,E2);
 + x = (T)kvzalloc(E1,E2);
 |
-- x = pci_alloc_consistent(E2,E1,E3);
-+ x = pci_zalloc_consistent(E2,E1,E3);
-|
-- x = (T *)pci_alloc_consistent(E2,E1,E3);
-+ x = pci_zalloc_consistent(E2,E1,E3);
-|
-- x = (T)pci_alloc_consistent(E2,E1,E3);
-+ x = (T)pci_zalloc_consistent(E2,E1,E3);
-|
 - x = kvmalloc_node(E1,E2,E3);
 + x = kvzalloc_node(E1,E2,E3);
 |
@@ -388,35 +378,6 @@ x << r7.x;
 msg="WARNING: kvzalloc should be used for %s, instead of kvmalloc/memset" % (x)
 coccilib.report.print_report(p[0], msg)
 
-//-----------------------------------------------------------------
-@r8 depends on org || report@
-type T, T2;
-expression x;
-expression E1,E2,E3;
-statement S;
-position p;
-@@
-
- x = (T)pci_alloc_consistent@p(E2,E1,E3);
- if ((x==NULL) || ...) S
- memset((T2)x,0,E1);
-
-@script:python depends on org@
-p << r8.p;
-x << r8.x;
-@@
-
-msg="%s" % (x)
-msg_safe=msg.replace("[","@(").replace("]",")")
-coccilib.org.print_todo(p[0], msg_safe)
-
-@script:python depends on report@
-p << r8.p;
-x << r8.x;
-@@
-
-msg="WARNING: pci_zalloc_consistent should be used for %s, instead of pci_alloc_consistent/memset" % (x)
-coccilib.report.print_report(p[0], msg)
 //-----------------------------------------------------------------
 @r9 depends on org || report@
 type T, T2;
index 999d585eaa7358dd813b8b0d5a204fc1ea48ca3c..e083bcae343f3e71290e433b7fd50861fd819ca4 100755 (executable)
@@ -11,13 +11,14 @@ DEPMOD=$1
 KERNELRELEASE=$2
 
 if ! test -r System.map ; then
+       echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
        exit 0
 fi
 
 if [ -z $(command -v $DEPMOD) ]; then
-       echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+       echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
        echo "This is probably in the kmod package." >&2
-       exit 1
+       exit 0
 fi
 
 # older versions of depmod require the version string to start with three
index 9cac65b7419c6aeb93252e7016d7cbfda070cb8d..1c943e03eaf2f838dee90edc7e908476f3b2eced 100644 (file)
@@ -9,21 +9,11 @@ dtc-objs      := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
 dtc-objs       += dtc-lexer.lex.o dtc-parser.tab.o
 
 # Source files need to get at the userspace version of libfdt_env.h to compile
+HOST_EXTRACFLAGS := -I$(src)/libfdt
 
-HOSTCFLAGS_DTC := -I$(src) -I$(src)/libfdt
-
-HOSTCFLAGS_checks.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_data.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_dtc.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_flattree.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_fstree.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_livetree.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_srcpos.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_treesource.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_util.o := $(HOSTCFLAGS_DTC)
-
-HOSTCFLAGS_dtc-lexer.lex.o := $(HOSTCFLAGS_DTC)
-HOSTCFLAGS_dtc-parser.tab.o := $(HOSTCFLAGS_DTC)
+# Generated files need one more search path to include headers in source tree
+HOSTCFLAGS_dtc-lexer.lex.o := -I$(src)
+HOSTCFLAGS_dtc-parser.tab.o := -I$(src)
 
 # dependencies on generated files need to be listed explicitly
 $(obj)/dtc-lexer.lex.o: $(obj)/dtc-parser.tab.h
index 7430a7c77a4a1ddb586519d5d38f215cd300c675..cb0c889e13aa05818e222bbfd2cef9f082a29655 100644 (file)
@@ -2,7 +2,7 @@ preferred-plugin-hostcc := $(if-success,[ $(gcc-version) -ge 40800 ],$(HOSTCXX),
 
 config PLUGIN_HOSTCC
        string
-       default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")"
+       default "$(shell,$(srctree)/scripts/gcc-plugin.sh "$(preferred-plugin-hostcc)" "$(HOSTCXX)" "$(CC)")" if CC_IS_GCC
        help
          Host compiler used to build GCC plugins.  This can be $(HOSTCXX),
          $(HOSTCC), or a null string if GCC plugin is unsupported.
index 8d8791069abf8626209ccc6ccb40714b8a3ef82a..67ed9f6ccdf8f09d8cb193d864950ed4e14ed9f7 100644 (file)
@@ -3,7 +3,8 @@
 # Kernel configuration targets
 # These targets are used from top-level makefile
 
-PHONY += xconfig gconfig menuconfig config localmodconfig localyesconfig
+PHONY += xconfig gconfig menuconfig config localmodconfig localyesconfig \
+       build_menuconfig build_nconfig build_gconfig build_xconfig
 
 ifdef KBUILD_KCONFIG
 Kconfig := $(KBUILD_KCONFIG)
@@ -33,6 +34,14 @@ config: $(obj)/conf
 nconfig: $(obj)/nconf
        $< $(silent) $(Kconfig)
 
+build_menuconfig: $(obj)/mconf
+
+build_nconfig: $(obj)/nconf
+
+build_gconfig: $(obj)/gconf
+
+build_xconfig: $(obj)/qconf
+
 localyesconfig localmodconfig: $(obj)/conf
        $(Q)perl $(srctree)/$(src)/streamline_config.pl --$@ $(srctree) $(Kconfig) > .tmp.config
        $(Q)if [ -f .config ]; then                                     \
@@ -169,7 +178,7 @@ HOSTLDLIBS_nconf    = $(shell . $(obj)/.nconf-cfg && echo $$libs)
 HOSTCFLAGS_nconf.o     = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
 HOSTCFLAGS_nconf.gui.o = $(shell . $(obj)/.nconf-cfg && echo $$cflags)
 
-$(obj)/nconf.o: $(obj)/.nconf-cfg
+$(obj)/nconf.o $(obj)/nconf.gui.o: $(obj)/.nconf-cfg
 
 # mconf: Used for the menuconfig target based on lxdialog
 hostprogs-y    += mconf
@@ -180,7 +189,8 @@ HOSTLDLIBS_mconf = $(shell . $(obj)/.mconf-cfg && echo $$libs)
 $(foreach f, mconf.o $(lxdialog), \
   $(eval HOSTCFLAGS_$f = $$(shell . $(obj)/.mconf-cfg && echo $$$$cflags)))
 
-$(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/.mconf-cfg
+$(obj)/mconf.o: $(obj)/.mconf-cfg
+$(addprefix $(obj)/lxdialog/, $(lxdialog)): $(obj)/.mconf-cfg
 
 # qconf: Used for the xconfig target based on Qt
 hostprogs-y    += qconf
@@ -211,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
 
 # check if necessary packages are available, and configure build flags
 define filechk_conf_cfg
-       $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
        $(CONFIG_SHELL) $<
 endef
 
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644 (file)
index 7a1c40b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
-       echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
-       exit 1
-fi
index b35cc93039791a1383e3d0d790ff7837be569a7d..7b2b37260669e333390f0c995990fe0fb65c9f06 100644 (file)
@@ -508,6 +508,11 @@ int main(int ac, char **av)
                input_mode = (enum input_mode)opt;
                switch (opt) {
                case syncconfig:
+                       /*
+                        * syncconfig is invoked during the build stage.
+                        * Suppress distracting "configuration written to ..."
+                        */
+                       conf_set_message_callback(NULL);
                        sync_kconfig = 1;
                        break;
                case defconfig:
index 533b3d8f8f08dbab20fda2b3488d342f47ede5a2..480ecd8b9f415693c0747478b2021b5e972a42bb 100755 (executable)
@@ -3,6 +3,13 @@
 
 PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if ! pkg-config --exists $PKG; then
        echo >&2 "*"
        echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
index e6f9facd00772f3779c6072d88f174c7302aa6dc..c812872d7f9d78697d40debbe636674419e646a4 100755 (executable)
@@ -4,20 +4,23 @@
 PKG="ncursesw"
 PKG2="ncurses"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw\"
index 83b5836615fb04a9cf389a2c978b75d4f5aa4f50..143c05fec1614bafe2f5f70dec85da3849e8225e 100644 (file)
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
                        switch (prop->type) {
                        case P_MENU:
                                child_count++;
-                               prompt = prompt;
                                if (single_menu_mode) {
                                        item_make("%s%*c%s",
                                                  menu->data ? "-->" : "++>",
index 42f5ac73548e4ba002506f4d38db920f8368e3c7..001559ef0a60b539c8615c23d4bcc94be5eaf59f 100644 (file)
@@ -4,20 +4,23 @@
 PKG="ncursesw menuw panelw"
 PKG2="ncurses menu panel"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw -lmenuw -lpanelw\"
index 0862e15625366058f421d8b4346eb7e795d5a1e5..02ccc0ae103187a4a1f9c56bac3eca08823087b8 100755 (executable)
@@ -4,6 +4,13 @@
 PKG="Qt5Core Qt5Gui Qt5Widgets"
 PKG2="QtCore QtGui"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if pkg-config --exists $PKG; then
        echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
        echo libs=\"$(pkg-config --libs $PKG)\"
index 4ec8b1f0d42c533480054136088b3d157b1be545..703b9b899ee9c6fd0b2f2de9411d8be5b4413cbe 100644 (file)
@@ -1011,7 +1011,7 @@ static struct dep_stack {
        struct dep_stack *prev, *next;
        struct symbol *sym;
        struct property *prop;
-       struct expr *expr;
+       struct expr **expr;
 } *check_top;
 
 static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym)
@@ -1076,31 +1076,42 @@ static void sym_check_print_recursive(struct symbol *last_sym)
                        fprintf(stderr, "%s:%d:error: recursive dependency detected!\n",
                                prop->file->name, prop->lineno);
 
-               if (stack->expr) {
-                       fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n",
-                               prop->file->name, prop->lineno,
+               if (sym_is_choice(sym)) {
+                       fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n",
+                               menu->file->name, menu->lineno,
                                sym->name ? sym->name : "<choice>",
-                               prop_get_type_name(prop->type),
                                next_sym->name ? next_sym->name : "<choice>");
-               } else if (stack->prop) {
+               } else if (sym_is_choice_value(sym)) {
+                       fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n",
+                               menu->file->name, menu->lineno,
+                               sym->name ? sym->name : "<choice>",
+                               next_sym->name ? next_sym->name : "<choice>");
+               } else if (stack->expr == &sym->dir_dep.expr) {
                        fprintf(stderr, "%s:%d:\tsymbol %s depends on %s\n",
                                prop->file->name, prop->lineno,
                                sym->name ? sym->name : "<choice>",
                                next_sym->name ? next_sym->name : "<choice>");
-               } else if (sym_is_choice(sym)) {
-                       fprintf(stderr, "%s:%d:\tchoice %s contains symbol %s\n",
-                               menu->file->name, menu->lineno,
+               } else if (stack->expr == &sym->rev_dep.expr) {
+                       fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n",
+                               prop->file->name, prop->lineno,
                                sym->name ? sym->name : "<choice>",
                                next_sym->name ? next_sym->name : "<choice>");
-               } else if (sym_is_choice_value(sym)) {
-                       fprintf(stderr, "%s:%d:\tsymbol %s is part of choice %s\n",
-                               menu->file->name, menu->lineno,
+               } else if (stack->expr == &sym->implied.expr) {
+                       fprintf(stderr, "%s:%d:\tsymbol %s is implied by %s\n",
+                               prop->file->name, prop->lineno,
+                               sym->name ? sym->name : "<choice>",
+                               next_sym->name ? next_sym->name : "<choice>");
+               } else if (stack->expr) {
+                       fprintf(stderr, "%s:%d:\tsymbol %s %s value contains %s\n",
+                               prop->file->name, prop->lineno,
                                sym->name ? sym->name : "<choice>",
+                               prop_get_type_name(prop->type),
                                next_sym->name ? next_sym->name : "<choice>");
                } else {
-                       fprintf(stderr, "%s:%d:\tsymbol %s is selected by %s\n",
+                       fprintf(stderr, "%s:%d:\tsymbol %s %s is visible depending on %s\n",
                                prop->file->name, prop->lineno,
                                sym->name ? sym->name : "<choice>",
+                               prop_get_type_name(prop->type),
                                next_sym->name ? next_sym->name : "<choice>");
                }
        }
@@ -1157,12 +1168,26 @@ static struct symbol *sym_check_sym_deps(struct symbol *sym)
 
        dep_stack_insert(&stack, sym);
 
+       stack.expr = &sym->dir_dep.expr;
+       sym2 = sym_check_expr_deps(sym->dir_dep.expr);
+       if (sym2)
+               goto out;
+
+       stack.expr = &sym->rev_dep.expr;
        sym2 = sym_check_expr_deps(sym->rev_dep.expr);
        if (sym2)
                goto out;
 
+       stack.expr = &sym->implied.expr;
+       sym2 = sym_check_expr_deps(sym->implied.expr);
+       if (sym2)
+               goto out;
+
+       stack.expr = NULL;
+
        for (prop = sym->prop; prop; prop = prop->next) {
-               if (prop->type == P_CHOICE || prop->type == P_SELECT)
+               if (prop->type == P_CHOICE || prop->type == P_SELECT ||
+                   prop->type == P_IMPLY)
                        continue;
                stack.prop = prop;
                sym2 = sym_check_expr_deps(prop->visible.expr);
@@ -1170,7 +1195,7 @@ static struct symbol *sym_check_sym_deps(struct symbol *sym)
                        break;
                if (prop->type != P_DEFAULT || sym_is_choice(sym))
                        continue;
-               stack.expr = prop->expr;
+               stack.expr = &prop->expr;
                sym2 = sym_check_expr_deps(prop->expr);
                if (sym2)
                        break;
@@ -1248,9 +1273,6 @@ struct symbol *sym_check_deps(struct symbol *sym)
                sym->flags &= ~SYMBOL_CHECK;
        }
 
-       if (sym2 && sym2 == sym)
-               sym2 = NULL;
-
        return sym2;
 }
 
similarity index 93%
rename from scripts/kconfig/tests/warn_recursive_dep/Kconfig
rename to scripts/kconfig/tests/err_recursive_dep/Kconfig
index a65bfcb7137eea94c9dbb5053673367e4352f00d..ebdb3ffd87170a8b114efa168428bf1e87f9df6b 100644 (file)
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
 # depends on itself
 
 config A
@@ -31,7 +33,6 @@ config D2
        bool
 
 # depends on and imply
-# This is not recursive dependency
 
 config E1
        bool "E1"
diff --git a/scripts/kconfig/tests/err_recursive_dep/__init__.py b/scripts/kconfig/tests/err_recursive_dep/__init__.py
new file mode 100644 (file)
index 0000000..5f3821b
--- /dev/null
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+"""
+Detect recursive dependency error.
+
+Recursive dependency should be treated as an error.
+"""
+
+def test(conf):
+    assert conf.oldaskconfig() == 1
+    assert conf.stderr_contains('expected_stderr')
diff --git a/scripts/kconfig/tests/err_recursive_dep/expected_stderr b/scripts/kconfig/tests/err_recursive_dep/expected_stderr
new file mode 100644 (file)
index 0000000..84679b1
--- /dev/null
@@ -0,0 +1,38 @@
+Kconfig:11:error: recursive dependency detected!
+Kconfig:11:    symbol B is selected by B
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:5:error: recursive dependency detected!
+Kconfig:5:     symbol A depends on A
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:17:error: recursive dependency detected!
+Kconfig:17:    symbol C1 depends on C2
+Kconfig:21:    symbol C2 depends on C1
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:32:error: recursive dependency detected!
+Kconfig:32:    symbol D2 is selected by D1
+Kconfig:27:    symbol D1 depends on D2
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:37:error: recursive dependency detected!
+Kconfig:37:    symbol E1 depends on E2
+Kconfig:42:    symbol E2 is implied by E1
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:60:error: recursive dependency detected!
+Kconfig:60:    symbol G depends on G
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
+
+Kconfig:51:error: recursive dependency detected!
+Kconfig:51:    symbol F2 depends on F1
+Kconfig:49:    symbol F1 default value contains F2
+For a resolution refer to Documentation/kbuild/kconfig-language.txt
+subsection "Kconfig recursive dependency limitations"
diff --git a/scripts/kconfig/tests/warn_recursive_dep/__init__.py b/scripts/kconfig/tests/warn_recursive_dep/__init__.py
deleted file mode 100644 (file)
index adb2195..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-"""
-Warn recursive inclusion.
-
-Recursive dependency should be warned.
-"""
-
-def test(conf):
-    assert conf.oldaskconfig() == 0
-    assert conf.stderr_contains('expected_stderr')
diff --git a/scripts/kconfig/tests/warn_recursive_dep/expected_stderr b/scripts/kconfig/tests/warn_recursive_dep/expected_stderr
deleted file mode 100644 (file)
index 3de807d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Kconfig:9:error: recursive dependency detected!
-Kconfig:9:     symbol B is selected by B
-For a resolution refer to Documentation/kbuild/kconfig-language.txt
-subsection "Kconfig recursive dependency limitations"
-
-Kconfig:3:error: recursive dependency detected!
-Kconfig:3:     symbol A depends on A
-For a resolution refer to Documentation/kbuild/kconfig-language.txt
-subsection "Kconfig recursive dependency limitations"
-
-Kconfig:15:error: recursive dependency detected!
-Kconfig:15:    symbol C1 depends on C2
-Kconfig:19:    symbol C2 depends on C1
-For a resolution refer to Documentation/kbuild/kconfig-language.txt
-subsection "Kconfig recursive dependency limitations"
-
-Kconfig:30:error: recursive dependency detected!
-Kconfig:30:    symbol D2 is selected by D1
-Kconfig:25:    symbol D1 depends on D2
-For a resolution refer to Documentation/kbuild/kconfig-language.txt
-subsection "Kconfig recursive dependency limitations"
-
-Kconfig:59:error: recursive dependency detected!
-Kconfig:59:    symbol G depends on G
-For a resolution refer to Documentation/kbuild/kconfig-language.txt
-subsection "Kconfig recursive dependency limitations"
-
-Kconfig:50:error: recursive dependency detected!
-Kconfig:50:    symbol F2 depends on F1
-Kconfig:48:    symbol F1 default value contains F2
index 4bf811c09f5940eb6b2b21821e38446884a3f413..c8cf45362bd6f223dc15be63438b1ce71dcbefa3 100755 (executable)
@@ -75,7 +75,7 @@ modpost_link()
                ${KBUILD_VMLINUX_LIBS}                          \
                --end-group"
 
-       ${LD} ${LDFLAGS} -r -o ${1} ${objects}
+       ${LD} ${KBUILD_LDFLAGS} -r -o ${1} ${objects}
 }
 
 # Link of vmlinux
@@ -95,7 +95,7 @@ vmlinux_link()
                        --end-group                             \
                        ${1}"
 
-               ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}     \
+               ${LD} ${KBUILD_LDFLAGS} ${LDFLAGS_vmlinux} -o ${2}      \
                        -T ${lds} ${objects}
        else
                objects="-Wl,--whole-archive                    \
index dc6d714e4dcb310817de378ec88e136263ab8030..0d998c54564dfdc5793faa968f72e8e985bce6c8 100644 (file)
@@ -672,7 +672,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                        if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
                                break;
                        if (symname[0] == '.') {
-                               char *munged = strdup(symname);
+                               char *munged = NOFAIL(strdup(symname));
                                munged[0] = '_';
                                munged[1] = toupper(munged[1]);
                                symname = munged;
@@ -1318,7 +1318,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
 static char *sec2annotation(const char *s)
 {
        if (match(s, init_exit_sections)) {
-               char *p = malloc(20);
+               char *p = NOFAIL(malloc(20));
                char *r = p;
 
                *p++ = '_';
@@ -1338,7 +1338,7 @@ static char *sec2annotation(const char *s)
                        strcat(p, " ");
                return r;
        } else {
-               return strdup("");
+               return NOFAIL(strdup(""));
        }
 }
 
@@ -2036,7 +2036,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
 {
        if (buf->size - buf->pos < len) {
                buf->size += len + SZ;
-               buf->p = realloc(buf->p, buf->size);
+               buf->p = NOFAIL(realloc(buf->p, buf->size));
        }
        strncpy(buf->p + buf->pos, s, len);
        buf->pos += len;
index fe06e77c15eb7072d1cb9c09d9481048984e03d7..f599031260d5178d10dd83554ac20d957c8b82af 100755 (executable)
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
     $type = ".quad";
     $alignment = 2;
+} elsif ($arch eq "nds32") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
+    $alignment = 2;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
index 71f39410691b6be14774102000507c834c8ad493..79f7dd57d571e749dc3e36ed95a473cd599e0056 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
                        printf '%s' -dirty
                fi
 
diff --git a/scripts/subarch.include b/scripts/subarch.include
new file mode 100644 (file)
index 0000000..6506828
--- /dev/null
@@ -0,0 +1,13 @@
+# SUBARCH tells the usermode build what the underlying arch is.  That is set
+# first, and if a usermode build is happening, the "ARCH=um" on the command
+# line overrides the setting of ARCH below.  If a native build is happening,
+# then ARCH is assigned, getting whatever value it gets normally, and
+# SUBARCH is subsequently ignored.
+
+SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+                                 -e s/sun4u/sparc64/ \
+                                 -e s/arm.*/arm/ -e s/sa110/arm/ \
+                                 -e s/s390x/s390/ -e s/parisc64/parisc/ \
+                                 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+                                 -e s/riscv.*/riscv/)
index 27d8b2688f755e974c0b168befc6c625529c3c66..d9aa521b52063501fce79062e35324a3c9046ec0 100644 (file)
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
 config PAGE_TABLE_ISOLATION
        bool "Remove the kernel mapping in user mode"
        default y
-       depends on X86 && !UML
+       depends on (X86_64 || X86_PAE) && !UML
        help
          This feature reduces the number of hardware side channels by
          ensuring that the majority of kernel addresses are not mapped
index f2f22d00db18893a917006794c5a09e341aae2b0..4ccec1bcf6f54f261542a546458cab77c6af9e52 100644 (file)
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        struct aa_label *label = aa_secid_to_label(secid);
        int len;
 
-       AA_BUG(!secdata);
        AA_BUG(!seclen);
 
        if (!label)
index 711e89d8c4153b6123678772e83efe1092f4ae1f..3b602a1e27fa224e3a7628436ac6f93309a54c4a 100644 (file)
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
        }
        dh_inputs.g_size = dlen;
 
-       dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+       dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
        if (dlen < 0) {
                ret = dlen;
                goto out2;
index 69517e18ef07fc241d18335e7f209a2b1ded96d1..08d5662039e38123e349bc5352c2b674280048f3 100644 (file)
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
                runtime->avail = 0;
        else
                runtime->avail = runtime->buffer_size;
-       runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+       runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
        if (!runtime->buffer) {
                kfree(runtime);
                return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
        if (params->avail_min < 1 || params->avail_min > params->buffer_size)
                return -EINVAL;
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
+               newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
                spin_lock_irq(&runtime->lock);
index 730ea91d9be886d9a16ff903ecc91f322c711e50..93676354f87f4e5c23d897c3d8cee568b6f69ac6 100644 (file)
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
 error:
        mutex_unlock(&devices_mutex);
        snd_bebob_stream_destroy_duplex(bebob);
+       kfree(bebob->maudio_special_quirk);
+       bebob->maudio_special_quirk = NULL;
        snd_card_free(bebob->card);
        dev_info(&bebob->unit->device,
                 "Sound card registration failed: %d\n", err);
index bd55620c6a479315f6787eb26de2c73bf4913eb6..c266997ad299d93c71632bebba2c83db6cf9bc09 100644 (file)
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        struct fw_device *device = fw_parent_device(unit);
        int err, rcode;
        u64 date;
-       __le32 cues[3] = {
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
-       };
+       __le32 *cues;
 
        /* check date of software used to build */
        err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
                                   &date, sizeof(u64));
        if (err < 0)
-               goto end;
+               return err;
        /*
         * firmware version 5058 or later has date later than "20070401", but
         * 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        if (date < 0x3230303730343031LL) {
                dev_err(&unit->device,
                        "Use firmware version 5058 or later\n");
-               err = -ENOSYS;
-               goto end;
+               return -ENXIO;
        }
 
+       cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+       if (!cues)
+               return -ENOMEM;
+
+       cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+       cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+       cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
        rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
                                   device->node_id, device->generation,
                                   device->max_speed, BEBOB_ADDR_REG_REQ,
-                                  cues, sizeof(cues));
+                                  cues, 3 * sizeof(*cues));
+       kfree(cues);
        if (rcode != RCODE_COMPLETE) {
                dev_err(&unit->device,
                        "Failed to send a cue to load firmware\n");
                err = -EIO;
        }
-end:
+
        return err;
 }
 
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
                bebob->midi_output_ports = 2;
        }
 end:
-       if (err < 0) {
-               kfree(params);
-               bebob->maudio_special_quirk = NULL;
-       }
        mutex_unlock(&bebob->mutex);
        return err;
 }
index 1f5e1d23f31a7132a5dfb5f3f34468b1517a89d0..ef689997d6a5b55f3d273ab4f8125fc87796cdd9 100644 (file)
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
        fw_unit_put(dg00x->unit);
 
        mutex_destroy(&dg00x->mutex);
+       kfree(dg00x);
 }
 
 static void dg00x_card_free(struct snd_card *card)
index ad7a0a32557dc778e32acc6cb813cf4e56a54623..64c3cb0fb926ff8b9a51b21ec4d81cca464d3583 100644 (file)
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
 {
        __le32 *reg;
        int i;
+       int err;
 
        reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
        if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
                        reg[i] = cpu_to_le32(0x00000001);
        }
 
-       return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
-                                 FF400_FETCH_PCM_FRAMES, reg,
-                                 sizeof(__le32) * 18, 0);
+       err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+                                FF400_FETCH_PCM_FRAMES, reg,
+                                sizeof(__le32) * 18, 0);
+       kfree(reg);
+       return err;
 }
 
 static void ff400_dump_sync_status(struct snd_ff *ff,
index 71a0613d3da040c49ef633f4877f98d8db219a99..f2d073365cf6365e5d3e5c841a7288a93c5b1ec5 100644 (file)
@@ -301,6 +301,8 @@ error:
        snd_efw_transaction_remove_instance(efw);
        snd_efw_stream_destroy_duplex(efw);
        snd_card_free(efw->card);
+       kfree(efw->resp_buf);
+       efw->resp_buf = NULL;
        dev_info(&efw->unit->device,
                 "Sound card registration failed: %d\n", err);
 }
index 1e5b2c8026355d9b788843e29990043249065b02..2ea8be6c858460dfb7c2b55fd380ca313d69c652 100644 (file)
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
 
        kfree(oxfw->spec);
        mutex_destroy(&oxfw->mutex);
+       kfree(oxfw);
 }
 
 /*
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
 static void do_registration(struct work_struct *work)
 {
        struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+       int i;
        int err;
 
        if (oxfw->registered)
@@ -269,7 +271,15 @@ error:
        snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
        if (oxfw->has_output)
                snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+       for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+               kfree(oxfw->tx_stream_formats[i]);
+               oxfw->tx_stream_formats[i] = NULL;
+               kfree(oxfw->rx_stream_formats[i]);
+               oxfw->rx_stream_formats[i] = NULL;
+       }
        snd_card_free(oxfw->card);
+       kfree(oxfw->spec);
+       oxfw->spec = NULL;
        dev_info(&oxfw->unit->device,
                 "Sound card registration failed: %d\n", err);
 }
index 44ad41fb7374070458de78e991330a744d3f0aff..d3fdc463a884e31e15518f2780c4ca080573a9de 100644 (file)
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
        fw_unit_put(tscm->unit);
 
        mutex_destroy(&tscm->mutex);
+       kfree(tscm);
 }
 
 static void tscm_card_free(struct snd_card *card)
index 1bd27576db98d50cc12382c209c3938fe9fe5e14..a835558ddbc9b560a5e456a1594693350263c3d9 100644 (file)
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
  */
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
 {
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
 
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
 
        snd_hdac_ext_link_stream_clear(stream);
 
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
        udelay(3);
        timeout = 50;
        do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
                                 int stream)
 {
-       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream));
+       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
 
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
                                 bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->spbcap) {
                dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
        else
                snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
 }
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
                                bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->drsmcap) {
                dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
        else
                snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
 }
index 560ec0986e1a56d46d9a5c7814cf4175a469249f..74244d8e2909090cfb3bfc54c57c9591332decdd 100644 (file)
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
  */
 void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
 {
+       WARN_ON_ONCE(!bus->rb.area);
+
        spin_lock_irq(&bus->reg_lock);
        /* CORB set up */
        bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
 EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
 
 /* reset codec link */
-static int azx_reset(struct hdac_bus *bus, bool full_reset)
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
 {
        if (!full_reset)
                goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
  skip_reset:
        /* check to see if controller is ready */
        if (!snd_hdac_chip_readb(bus, GCTL)) {
-               dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
+               dev_dbg(bus->dev, "controller not ready!\n");
                return -EBUSY;
        }
 
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
 
 /* enable interrupts */
 static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
                return false;
 
        /* reset controller */
-       azx_reset(bus, full_reset);
+       snd_hdac_bus_reset_link(bus, full_reset);
 
-       /* initialize interrupts */
+       /* clear interrupts */
        azx_int_clear(bus);
-       azx_int_enable(bus);
 
        /* initialize the codec command I/O */
        snd_hdac_bus_init_cmd_io(bus);
 
+       /* enable interrupts after CORB/RIRB buffers are initialized above */
+       azx_int_enable(bus);
+
        /* program the position buffer */
        if (bus->use_posbuf && bus->posbuf.addr) {
                snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
index 90713741c2dcac91ba658a32ec2f8580394e90fb..6ebe817801ea9a2f5e0ef3c318ce1b2fad383446 100644 (file)
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
                emu->support_tlv = 1;
                return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
        case SNDRV_EMU10K1_IOCTL_INFO:
-               info = kmalloc(sizeof(*info), GFP_KERNEL);
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
                if (!info)
                        return -ENOMEM;
                snd_emu10k1_fx8010_info(emu, info);
index 0a50855370348e37a52a2788463bc0a5ead4a5d1..26d348b47867d6d595aa8701a3cd779bf01d6204 100644 (file)
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
 
        list_for_each_codec(codec, bus) {
                /* FIXME: maybe a better way needed for forced reset */
-               cancel_delayed_work_sync(&codec->jackpoll_work);
+               if (current_work() != &codec->jackpoll_work.work)
+                       cancel_delayed_work_sync(&codec->jackpoll_work);
 #ifdef CONFIG_PM
                if (hda_codec_is_power_on(codec)) {
                        hda_call_codec_suspend(codec);
index 1b2ce304152a65c52ce60322dbeec684965cf335..aa4c672dbaf73010aa26966173c403e6e8b0f4a7 100644 (file)
@@ -365,8 +365,10 @@ enum {
  */
 #ifdef SUPPORT_VGA_SWITCHEROO
 #define use_vga_switcheroo(chip)       ((chip)->use_vga_switcheroo)
+#define needs_eld_notify_link(chip)    ((chip)->need_eld_notify_link)
 #else
 #define use_vga_switcheroo(chip)       0
+#define needs_eld_notify_link(chip)    false
 #endif
 
 #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
 #endif
 
 static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+static void set_default_power_save(struct azx *chip);
 
 /*
  * initialize the PCI registers
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev)
            azx_bus(chip)->codec_powered || !chip->running)
                return -EBUSY;
 
+       /* ELD notification gets broken when HD-audio bus is off */
+       if (needs_eld_notify_link(hda))
+               return -EBUSY;
+
        return 0;
 }
 
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
        return true;
 }
 
+/*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
+{
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+       struct hda_codec *codec;
+
+       if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
+               list_for_each_codec(codec, &chip->bus)
+                       codec->auto_runtime_pm = 1;
+               /* reset the power save setup */
+               if (chip->running)
+                       set_default_power_save(chip);
+       }
+}
+
+static void azx_vs_gpu_bound(struct pci_dev *pci,
+                            enum vga_switcheroo_client_id client_id)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct azx *chip = card->private_data;
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
+       if (client_id == VGA_SWITCHEROO_DIS)
+               hda->need_eld_notify_link = 0;
+       setup_vga_switcheroo_runtime_pm(chip);
+}
+
 static void init_vga_switcheroo(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip)
                dev_info(chip->card->dev,
                         "Handle vga_switcheroo audio client\n");
                hda->use_vga_switcheroo = 1;
+               hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
                chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
                pci_dev_put(p);
        }
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip)
 static const struct vga_switcheroo_client_ops azx_vs_ops = {
        .set_gpu_state = azx_vs_set_state,
        .can_switch = azx_vs_can_switch,
+       .gpu_bound = azx_vs_gpu_bound,
 };
 
 static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip)
 #define init_vga_switcheroo(chip)              /* NOP */
 #define register_vga_switcheroo(chip)          0
 #define check_hdmi_disabled(pci)       false
+#define setup_vga_switcheroo_runtime_pm(chip)  /* NOP */
 #endif /* SUPPORT_VGA_SWITCHER */
 
 /*
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip)
 
        if (azx_has_pm_runtime(chip) && chip->running)
                pm_runtime_get_noresume(&pci->dev);
+       chip->running = 0;
 
        azx_del_card_list(chip);
 
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
 };
 #endif /* CONFIG_PM */
 
+static void set_default_power_save(struct azx *chip)
+{
+       int val = power_save;
+
+#ifdef CONFIG_PM
+       if (pm_blacklist) {
+               const struct snd_pci_quirk *q;
+
+               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+               if (q && val) {
+                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+                                q->subvendor, q->subdevice);
+                       val = 0;
+               }
+       }
+#endif /* CONFIG_PM */
+       snd_hda_set_power_save(&chip->bus, val * 1000);
+}
+
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
 static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
        [AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip)
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
-       struct hda_codec *codec;
        int dev = chip->dev_index;
-       int val;
        int err;
 
        hda->probe_continued = 1;
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip)
        if (err < 0)
                goto out_free;
 
+       setup_vga_switcheroo_runtime_pm(chip);
+
        chip->running = 1;
        azx_add_card_list(chip);
 
-       val = power_save;
-#ifdef CONFIG_PM
-       if (pm_blacklist) {
-               const struct snd_pci_quirk *q;
-
-               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
-               if (q && val) {
-                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
-                                q->subvendor, q->subdevice);
-                       val = 0;
-               }
-       }
-#endif /* CONFIG_PM */
-       /*
-        * The discrete GPU cannot power down unless the HDA controller runtime
-        * suspends, so activate runtime PM on codecs even if power_save == 0.
-        */
-       if (use_vga_switcheroo(hda))
-               list_for_each_codec(codec, &chip->bus)
-                       codec->auto_runtime_pm = 1;
+       set_default_power_save(chip);
 
-       snd_hda_set_power_save(&chip->bus, val * 1000);
        if (azx_has_pm_runtime(chip))
                pm_runtime_put_autosuspend(&pci->dev);
 
index e3a3d318d2e5f9fc57dbe9530a2470ab955b1765..f59719e06b91a38492957195dfda7328edeb80db 100644 (file)
@@ -37,6 +37,7 @@ struct hda_intel {
 
        /* vga_switcheroo setup */
        unsigned int use_vga_switcheroo:1;
+       unsigned int need_eld_notify_link:1;
        unsigned int vga_switcheroo_registered:1;
        unsigned int init_failed:1; /* delayed init failed */
 
index e359938e3d7e16bfd3995084be4e1b20d6b1989e..77b265bd050531f59eeddb9eaee2cd75944f5649 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/sizes.h>
 #include <linux/pm_runtime.h>
 
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
        acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
 }
 
+static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
+{
+       u32 dma_ctrl;
+       int ret;
+
+       /* clear the reset bit */
+       dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+       dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+       acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+       /* check the reset bit before programming configuration registers */
+       ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
+                                dma_ctrl,
+                                !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
+                                100, ACP_DMA_RESET_TIME);
+       if (ret < 0)
+               pr_err("Failed to clear reset of channel : %d\n", ch_num);
+}
+
 /*
  * Initialize the DMA descriptor information for transfer between
  * system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
                config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
                                              &dmadscr[i]);
        }
+       pre_config_reset(acp_mmio, ch);
        config_acp_dma_channel(acp_mmio, ch,
                               dma_dscr_idx - 1,
                               NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
                config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
                                              &dmadscr[i]);
        }
+       pre_config_reset(acp_mmio, ch);
        /* Configure the DMA channel with the above descriptore */
        config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
                               NUM_DSCRS_PER_CHANNEL,
index 275677de669f0562b8fea2fe82c4cf07f33a90cd..407554175282fffe6b68bbfb9aaf33b3f564c0a4 100644 (file)
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
        SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
                                3, 1, 0),
        SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
-       SOC_SINGLE("MMTLR Data Switch", 0,
-                               1, 1, 0),
+       SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+                               0, 1, 0),
        SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
        SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
 };
index 92b7125ea16911b2de4dda0182cacef93d7cd4e2..1093f766d0d2c73fe647bac2449c7ac06d069ecd 100644 (file)
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
        case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+       case MAX98373_R203E_AMP_PATH_GAIN:
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component)
        /* Software Reset */
        regmap_write(max98373->regmap,
                MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+       usleep_range(10000, 11000);
 
        /* IV default slot configuration */
        regmap_write(max98373->regmap,
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev)
 
        regmap_write(max98373->regmap,
                MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+       usleep_range(10000, 11000);
        regcache_cache_only(max98373->regmap, false);
        regcache_sync(max98373->regmap);
        return 0;
index dca82dd6e3bfeb7cb23d3e3a478983e3c4b36b2e..32fe76c3134ab41df5e8bbb443e2e5f1b7d4a54c 100644 (file)
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
        {RT5514_ANA_CTRL_LDO10,         0x00028604},
        {RT5514_ANA_CTRL_ADCFED,        0x00000800},
        {RT5514_ASRC_IN_CTRL1,          0x00000003},
-       {RT5514_DOWNFILTER0_CTRL3,      0x10000352},
-       {RT5514_DOWNFILTER1_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER0_CTRL3,      0x10000342},
+       {RT5514_DOWNFILTER1_CTRL3,      0x10000342},
 };
 
 static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
        {RT5514_ASRC_IN_CTRL1,          0x00000003},
        {RT5514_DOWNFILTER0_CTRL1,      0x00020c2f},
        {RT5514_DOWNFILTER0_CTRL2,      0x00020c2f},
-       {RT5514_DOWNFILTER0_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER0_CTRL3,      0x10000342},
        {RT5514_DOWNFILTER1_CTRL1,      0x00020c2f},
        {RT5514_DOWNFILTER1_CTRL2,      0x00020c2f},
-       {RT5514_DOWNFILTER1_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER1_CTRL3,      0x10000342},
        {RT5514_ANA_CTRL_LDO10,         0x00028604},
        {RT5514_ANA_CTRL_LDO18_16,      0x02000345},
        {RT5514_ANA_CTRL_ADC12,         0x0000a2a8},
index 640d400ca01359bf06c3e0f90ea1bad008a811a0..afe7d5b193133a27bd6dd8470e166d6dc0fc07b2 100644 (file)
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
 }
 
 static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
 
        /* DAC Digital Volume */
        SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
-               RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+               RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
 
        /* IN Boost Volume */
        SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
        SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
                RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
-               RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+               RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
 
        /* ADC Boost Volume Control */
        SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
index d53680ac78e42d0bb0979afcbea8e77046d181b9..6df158669420db700af2d6e0a11707319e091290 100644 (file)
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
        struct sigmadsp_control *ctrl, void *data)
 {
        /* safeload loads up to 20 bytes in a atomic operation */
-       if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
-           sigmadsp->ops->safeload)
+       if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
                return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
                        ctrl->num_bytes);
        else
index 14999b999fd31a4b25e5a06779078d1cc462ea29..0d6145549a98d62b7d91474311150fd4ba082b45 100644 (file)
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
               TAS6424_FAULT_PVDD_UV |
               TAS6424_FAULT_VBAT_UV;
 
-       if (reg)
+       if (!reg) {
+               tas6424->last_fault1 = reg;
                goto check_global_fault2_reg;
+       }
 
        /*
         * Only flag errors once for a given occurrence. This is needed as
@@ -461,8 +463,10 @@ check_global_fault2_reg:
               TAS6424_FAULT_OTSD_CH3 |
               TAS6424_FAULT_OTSD_CH4;
 
-       if (!reg)
+       if (!reg) {
+               tas6424->last_fault2 = reg;
                goto check_warn_reg;
+       }
 
        if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
                dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +501,10 @@ check_warn_reg:
               TAS6424_WARN_VDD_OTW_CH3 |
               TAS6424_WARN_VDD_OTW_CH4;
 
-       if (!reg)
+       if (!reg) {
+               tas6424->last_warn = reg;
                goto out;
+       }
 
        if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
                dev_warn(dev, "experienced a VDD under voltage condition\n");
index f27464c2c5bad53ccca78fa8ba3bd0bf3f2cfef6..79541960f45d94066af3401f115a5d091a7b552c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
+#include <linux/acpi.h>
 
 #include "wm8804.h"
 
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
 
+#if defined(CONFIG_OF)
 static const struct of_device_id wm8804_of_match[] = {
        { .compatible = "wlf,wm8804", },
        { }
 };
 MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+       { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+       { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
 
 static struct i2c_driver wm8804_i2c_driver = {
        .driver = {
                .name = "wm8804",
                .pm = &wm8804_pm,
-               .of_match_table = wm8804_of_match,
+               .of_match_table = of_match_ptr(wm8804_of_match),
+               .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
        },
        .probe = wm8804_i2c_probe,
        .remove = wm8804_i2c_remove,
index 953d94d505864ddcefa04e78db83d96df0f19477..ade34c26ad2f3381d806ff069044d77c4186c3d8 100644 (file)
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
 
 static struct platform_driver wm9712_component_driver = {
        .driver = {
-               .name = "wm9712-component",
+               .name = "wm9712-codec",
        },
 
        .probe = wm9712_probe,
index d32844f94d74b87243e4a16d61311d2c92121dd4..b6dc524830b21a04371e79607e9b4ca1f902c108 100644 (file)
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_MONO_SPEAKER |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Linx Linx7 tablet */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
+               },
+               .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_JD_NOT_INV |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* MSI S100 tablet */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Onda v975w */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+                       /* The above are too generic, also match BIOS info */
+                       DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
+                       DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
+               },
+               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+                                       BYT_RT5640_JD_SRC_JD2_IN4N |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_DIFF_MIC |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Pipo W4 */
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
index dce64948564940ea7383e0f4975de4b4ec7293ec..1d17be0f78a089e38a95ffbc3e84f0604ad6c2e3 100644 (file)
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus)
                return -ENXIO;
        }
 
-       skl_init_chip(bus, true);
+       snd_hdac_bus_reset_link(bus, true);
 
        snd_hdac_bus_parse_capabilities(bus);
 
index dc94c5c53788b044b183f2406df7a5d9234cc5fc..c6b51571be945e5262dbf46966caab4e1615f529 100644 (file)
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
 {
        int i;
 
-       for (i = 0; i < MAX_SESSIONS; i++)
+       for (i = 0; i < MAX_SESSIONS; i++) {
                routing_data->sessions[i].port_id = -1;
+               routing_data->sessions[i].fedai_id = -1;
+       }
 
        return 0;
 }
index 3a3064dda57f329a96be5ef17b1dea8251f09f72..051f96405346b2f495c94ebc409656c98634a3c8 100644 (file)
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                goto rsnd_adg_get_clkout_end;
 
        req_size = prop->length / sizeof(u32);
+       if (req_size > REQ_SIZE) {
+               dev_err(dev,
+                       "too many clock-frequency, use top %d\n", REQ_SIZE);
+               req_size = REQ_SIZE;
+       }
 
        of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
        req_48kHz_rate = 0;
index f8425d8b44d2b54aff876f8759e81c740cb6157a..d23c2bbff0cf4e666dcf5d3a4b26fccaf22a05ba 100644 (file)
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status,
                        (func_call && (mod)->ops->fn) ? #fn : "");      \
                if (func_call && (mod)->ops->fn)                        \
                        tmp = (mod)->ops->fn(mod, io, param);           \
-               if (tmp)                                                \
+               if (tmp && (tmp != -EPROBE_DEFER))                      \
                        dev_err(dev, "%s[%d] : %s error %d\n",          \
                                rsnd_mod_name(mod), rsnd_mod_id(mod),   \
                                                     #fn, tmp);         \
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
        rsnd_dai_stream_quit(io);
 }
 
+static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
+       struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+       struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+       return rsnd_dai_call(prepare, io, priv);
+}
+
 static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
        .startup        = rsnd_soc_dai_startup,
        .shutdown       = rsnd_soc_dai_shutdown,
        .trigger        = rsnd_soc_dai_trigger,
        .set_fmt        = rsnd_soc_dai_set_fmt,
        .set_tdm_slot   = rsnd_soc_set_dai_tdm_slot,
+       .prepare        = rsnd_soc_dai_prepare,
 };
 
 void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1550,6 +1561,14 @@ exit_snd_probe:
                rsnd_dai_call(remove, &rdai->capture, priv);
        }
 
+       /*
+        * adg is very special mod which can't use rsnd_dai_call(remove),
+        * and it registers ADG clock on probe.
+        * It should be unregister if probe failed.
+        * Mainly it is assuming -EPROBE_DEFER case
+        */
+       rsnd_adg_remove(priv);
+
        return ret;
 }
 
index fe63ef8600d02f259743a8339ce7059ea2b1be25..d65ea7bc4dac2d2608cbcfbe3d3a4ebd915ad242 100644 (file)
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
        /* try to get DMAEngine channel */
        chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
        if (IS_ERR_OR_NULL(chan)) {
+               /* Let's follow when -EPROBE_DEFER case */
+               if (PTR_ERR(chan) == -EPROBE_DEFER)
+                       return PTR_ERR(chan);
+
                /*
                 * DMA failed. try to PIO mode
                 * see
index 96d93330b1e1a59ebba22668ff76fb4f8a325d3d..8f7a0abfa751ed7342223e406999d0166a10307f 100644 (file)
@@ -280,6 +280,9 @@ struct rsnd_mod_ops {
        int (*nolock_stop)(struct rsnd_mod *mod,
                    struct rsnd_dai_stream *io,
                    struct rsnd_priv *priv);
+       int (*prepare)(struct rsnd_mod *mod,
+                      struct rsnd_dai_stream *io,
+                      struct rsnd_priv *priv);
 };
 
 struct rsnd_dai_stream;
@@ -309,6 +312,7 @@ struct rsnd_mod {
  * H   0: fallback
  * H   0: hw_params
  * H   0: pointer
+ * H   0: prepare
  */
 #define __rsnd_mod_shift_nolock_start  0
 #define __rsnd_mod_shift_nolock_stop   0
@@ -323,6 +327,7 @@ struct rsnd_mod {
 #define __rsnd_mod_shift_fallback      28 /* always called */
 #define __rsnd_mod_shift_hw_params     28 /* always called */
 #define __rsnd_mod_shift_pointer       28 /* always called */
+#define __rsnd_mod_shift_prepare       28 /* always called */
 
 #define __rsnd_mod_add_probe           0
 #define __rsnd_mod_add_remove          0
@@ -337,6 +342,7 @@ struct rsnd_mod {
 #define __rsnd_mod_add_fallback                0
 #define __rsnd_mod_add_hw_params       0
 #define __rsnd_mod_add_pointer         0
+#define __rsnd_mod_add_prepare         0
 
 #define __rsnd_mod_call_probe          0
 #define __rsnd_mod_call_remove         0
@@ -351,6 +357,7 @@ struct rsnd_mod {
 #define __rsnd_mod_call_pointer                0
 #define __rsnd_mod_call_nolock_start   0
 #define __rsnd_mod_call_nolock_stop    1
+#define __rsnd_mod_call_prepare                0
 
 #define rsnd_mod_to_priv(mod)  ((mod)->priv)
 #define rsnd_mod_name(mod)     ((mod)->ops->name)
index 8304e4ec9242c68eeabeca2516938990678aabc8..3f880ec66459a989306ee38303692e35d354ca37 100644 (file)
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
        if (rsnd_ssi_is_multi_slave(mod, io))
                return 0;
 
-       if (ssi->usrcnt > 1) {
+       if (ssi->rate) {
                if (ssi->rate != rate) {
                        dev_err(dev, "SSI parent/child should use same rate\n");
                        return -EINVAL;
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
                         struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-       int ret;
 
        if (!rsnd_ssi_is_run_mods(mod, io))
                return 0;
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
 
        rsnd_mod_power_on(mod);
 
-       ret = rsnd_ssi_master_clk_start(mod, io);
-       if (ret < 0)
-               return ret;
-
        rsnd_ssi_config_init(mod, io);
 
        rsnd_ssi_register_setup(mod);
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
        return 0;
 }
 
+static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+                           struct rsnd_dai_stream *io,
+                           struct rsnd_priv *priv)
+{
+       return rsnd_ssi_master_clk_start(mod, io);
+}
+
 static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .name   = SSI_NAME,
        .probe  = rsnd_ssi_common_probe,
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .pointer = rsnd_ssi_pio_pointer,
        .pcm_new = rsnd_ssi_pcm_new,
        .hw_params = rsnd_ssi_hw_params,
+       .prepare = rsnd_ssi_prepare,
 };
 
 static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
        .pcm_new = rsnd_ssi_pcm_new,
        .fallback = rsnd_ssi_fallback,
        .hw_params = rsnd_ssi_hw_params,
+       .prepare = rsnd_ssi_prepare,
 };
 
 int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
index 9cfe10d8040cf8f0cea83d85d2dfb68a6b508f6d..473eefe8658e9a22af71b2f3af42f1f92cf00acd 100644 (file)
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
        sink = codec_dai->playback_widget;
        source = cpu_dai->capture_widget;
        if (sink && source) {
-               ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+               ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
                                           dai_link->num_params,
                                           source, sink);
                if (ret != 0) {
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
        sink = cpu_dai->playback_widget;
        source = codec_dai->capture_widget;
        if (sink && source) {
-               ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+               ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
                                           dai_link->num_params,
                                           source, sink);
                if (ret != 0) {
index 7e96793050c9b1a1b634d34e4e0ad9a658cf4111..461d951917c0569096eacd4274fd9eb1ade8f9c5 100644 (file)
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_dapm_path *source_p, *sink_p;
        struct snd_soc_dai *source, *sink;
+       struct snd_soc_pcm_runtime *rtd = w->priv;
        const struct snd_soc_pcm_stream *config = w->params + w->params_select;
        struct snd_pcm_substream substream;
        struct snd_pcm_hw_params *params = NULL;
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
                goto out;
        }
        substream.runtime = runtime;
+       substream.private_data = rtd;
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
@@ -3895,6 +3897,7 @@ outfree_w_param:
 }
 
 int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+                        struct snd_soc_pcm_runtime *rtd,
                         const struct snd_soc_pcm_stream *params,
                         unsigned int num_params,
                         struct snd_soc_dapm_widget *source,
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
 
        w->params = params;
        w->num_params = num_params;
+       w->priv = rtd;
 
        ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
        if (ret)
index 16e006f708ca0cbd44a63135bb996b8db7c3ba9e..4602464ebdfbfccd9296593ed5dd8b15f55305ff 100644 (file)
@@ -27,6 +27,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4e76630dd6554673d71ad647c1108bb54f1bcea2..97c3478ee6e718c8ac2de4c01edee6a0dd4cd27e 100644 (file)
@@ -39,6 +39,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4cdaa55fabfe2658e054b5aaf1455b6f0556e584..9a50f02b98946eb49df6cb5f407b1a4a04b89e6c 100644 (file)
@@ -4,7 +4,7 @@
 /*
  * KVM s390 specific structures and definitions
  *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
                struct {
                        __u64 reserved1[2];
                        __u64 gscb[4];
+                       __u64 etoken;
+                       __u64 etoken_extension;
                };
        };
 };
index 64aaa3f5f36ca2c6cb43f7ab5d02c0e8510a010d..89a048c2faec7f8a818d1a461ccd7fa67eca0fd9 100644 (file)
 #define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
 #define X86_FEATURE_L1TF_PTEINV                ( 7*32+29) /* "" L1TF workaround PTE inversion */
+#define X86_FEATURE_IBRS_ENHANCED      ( 7*32+30) /* Enhanced IBRS */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
-
+#define X86_FEATURE_EPT_AD             ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
 #define X86_FEATURE_FSGSBASE           ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
index c535c2fdea136a5e58725e72a2d867f545d393eb..86299efa804adbfc35d3338f9fe75083e6cdf5af 100644 (file)
@@ -378,4 +378,41 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
+#define KVM_STATE_NESTED_GUEST_MODE    0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON     0x00000002
+
+struct kvm_vmx_nested_state {
+       __u64 vmxon_pa;
+       __u64 vmcs_pa;
+
+       struct {
+               __u16 flags;
+       } smm;
+};
+
+/* for KVM_CAP_NESTED_STATE */
+struct kvm_nested_state {
+       /* KVM_STATE_* flags */
+       __u16 flags;
+
+       /* 0 for VMX, 1 for SVM.  */
+       __u16 format;
+
+       /* 128 for SVM, 128 + VMCS size for VMX.  */
+       __u32 size;
+
+       union {
+               /* VMXON, VMCS */
+               struct kvm_vmx_nested_state vmx;
+
+               /* Pad the header to 128 bytes.  */
+               __u8 pad[120];
+       };
+
+       __u8 data[0];
+};
+
 #endif /* _ASM_X86_KVM_H */
index 298ef1479240b6b899fb4185a5204d5ad56b8785..3b24dc05251c7ce908cc2be48befb971b5b8f564 100644 (file)
@@ -256,7 +256,7 @@ ENTRY(__memcpy_mcsafe)
 
        /* Copy successful. Return zero */
 .L_done_memcpy_trap:
-       xorq %rax, %rax
+       xorl %eax, %eax
        ret
 ENDPROC(__memcpy_mcsafe)
 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
index b2ec20e562bd5a3b0adf77b5f40be365bf255efa..b455930a3eaf7fd9095e116cfd9536e621a5cd1f 100644 (file)
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
        [BPF_MAP_TYPE_DEVMAP]           = "devmap",
        [BPF_MAP_TYPE_SOCKMAP]          = "sockmap",
        [BPF_MAP_TYPE_CPUMAP]           = "cpumap",
+       [BPF_MAP_TYPE_XSKMAP]           = "xskmap",
        [BPF_MAP_TYPE_SOCKHASH]         = "sockhash",
        [BPF_MAP_TYPE_CGROUP_STORAGE]   = "cgroup_storage",
 };
index 1832100d1b274db7b71045e4dfe9e7e00e534ae0..6d41323be291bbf7e18348375aa88006d36facda 100644 (file)
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
        }
 
        while (argc) {
-               if (argc < 2)
+               if (argc < 2) {
                        BAD_ARG();
+                       goto err_close_map;
+               }
 
                if (is_prefix(*argv, "cpu")) {
                        char *endptr;
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
                        NEXT_ARG();
                } else {
                        BAD_ARG();
+                       goto err_close_map;
                }
 
                do_all = false;
index dbf6e8bd98ba59be0519d284fdc7bf59b19f46d3..bbb2a8ef367cacd6cbd837898608d0de785c7d73 100644 (file)
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
                 * Found a match; just move the remaining
                 * entries up.
                 */
-               if (i == num_records) {
+               if (i == (num_records - 1)) {
                        kvp_file_info[pool].num_records--;
                        kvp_update_file(pool);
                        return 0;
index 6b0c36a58fcbc38b67157b53efe2044bd848f137..e56997288f2b07d273f860be50a830351ad128e1 100644 (file)
@@ -30,9 +30,12 @@ struct task_struct {
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
        int pid;
+       int state;
        char comm[17];
 };
 
+#define TASK_RUNNING 0
+
 extern struct task_struct *__curr(void);
 
 #define current (__curr())
diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h
new file mode 100644 (file)
index 0000000..e69de29
index 42990676a55e104ae85e7ea170c78c1e93427f1d..df4bedb9b01c281b7bf15048fef3063a35ede51c 100644 (file)
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free,     sys_pkey_free)
 __SYSCALL(__NR_statx,     sys_statx)
 #define __NR_io_pgetevents 292
 __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
 
 #undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
 
 /*
  * 32 bit systems traditionally used different
index 9c660e1688abe1cd6bf0e22bf709515e8a463e0d..300f336633f28ea20493570f80a73e30d87cd087 100644 (file)
@@ -687,6 +687,15 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index cf01b68242448512416c1b1aa25f0904915aad0a..43391e2d1153adb701433d6794702b73f2d60297 100644 (file)
@@ -164,6 +164,8 @@ enum {
        IFLA_CARRIER_UP_COUNT,
        IFLA_CARRIER_DOWN_COUNT,
        IFLA_NEW_IFINDEX,
+       IFLA_MIN_MTU,
+       IFLA_MAX_MTU,
        __IFLA_MAX
 };
 
@@ -334,6 +336,7 @@ enum {
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
        IFLA_BRPORT_ISOLATED,
+       IFLA_BRPORT_BACKUP_PORT,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
 
 #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
 
+/* XFRM section */
+enum {
+       IFLA_XFRM_UNSPEC,
+       IFLA_XFRM_LINK,
+       IFLA_XFRM_IF_ID,
+       __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
 enum macsec_validation_type {
        MACSEC_VALIDATE_DISABLED = 0,
        MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
        XDP_ATTACHED_DRV,
        XDP_ATTACHED_SKB,
        XDP_ATTACHED_HW,
+       XDP_ATTACHED_MULTI,
 };
 
 enum {
@@ -928,6 +942,9 @@ enum {
        IFLA_XDP_ATTACHED,
        IFLA_XDP_FLAGS,
        IFLA_XDP_PROG_ID,
+       IFLA_XDP_DRV_PROG_ID,
+       IFLA_XDP_SKB_PROG_ID,
+       IFLA_XDP_HW_PROG_ID,
        __IFLA_XDP_MAX,
 };
 
index b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96..07548de5c9889f5bcd425a7273b2732003bf3c30 100644 (file)
@@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
 #define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1391,6 +1394,9 @@ struct kvm_enc_region {
 /* Available with KVM_CAP_HYPERV_EVENTFD */
 #define KVM_HYPERV_EVENTFD        _IOW(KVMIO,  0xbd, struct kvm_hyperv_eventfd)
 
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE         _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE         _IOW(KVMIO,  0xbf, struct kvm_nested_state)
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index eeb787b1c53c72771c8d684154b7a87dc029a45b..f35eb72739c09e3ad0bd22e279fa4a33119c15f6 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index c51f8e5cc6080c03028727cdb81343fc4cabb6e1..84c3de89696a15c1a23e7226dea456a6232e2043 100644 (file)
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
 };
 
 #define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
 
 struct vhost_msg {
        int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
        };
 };
 
+struct vhost_msg_v2 {
+       __u32 type;
+       __u32 reserved;
+       union {
+               struct vhost_iotlb_msg iotlb;
+               __u8 padding[64];
+       };
+};
+
 struct vhost_memory_region {
        __u64 guest_phys_addr;
        __u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
 #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24,      \
                                         struct vhost_vring_state)
 
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
 /* VHOST_NET specific defines */
 
 /* Attach virtio net ring to a raw socket, or tap device.
index 56c4b3f8a01beaa414e2526020dcfcfbfc142b9b..439b8a27488d371fe323f879ba225650df23a359 100755 (executable)
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
             if len(vms) == 0:
                 self.do_read = False
 
-            self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+            self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
 
         else:
             self.paths = []
             self.do_read = True
-        self.reset()
+
+    def _verify_paths(self):
+        """Remove invalid paths"""
+        for path in self.paths:
+            if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
+                self.paths.remove(path)
+                continue
 
     def read(self, reset=0, by_guest=0):
         """Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
         # If no debugfs filtering support is available, then don't read.
         if not self.do_read:
             return results
+        self._verify_paths()
 
         paths = self.paths
         if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
             pid = self.stats.pid_filter
         self.screen.erase()
         gname = self.get_gname_from_pid(pid)
+        self._gname = gname
         if gname:
             gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
                                    if len(gname) > MAX_GUEST_NAME_LEN
                                    else gname))
         if pid > 0:
-            self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
-                               .format(pid, gname), curses.A_BOLD)
+            self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
         else:
-            self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+            self._headline = 'kvm statistics - summary'
+        self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
         if self.stats.fields_filter:
             regex = self.stats.fields_filter
             if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
 
             return sorted_items
 
+        if not self._is_running_guest(self.stats.pid_filter):
+            if self._gname:
+                try: # ...to identify the guest by name in case it's back
+                    pids = self.get_pid_from_gname(self._gname)
+                    if len(pids) == 1:
+                        self._refresh_header(pids[0])
+                        self._update_pid(pids[0])
+                        return
+                except:
+                    pass
+            self._display_guest_dead()
+            # leave final data on screen
+            return
         row = 3
         self.screen.move(row, 0)
         self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
         # print events
         tavg = 0
         tcur = 0
+        guest_removed = False
         for key, values in get_sorted_events(self, stats):
             if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
                 break
@@ -1191,7 +1213,10 @@ class Tui(object):
                 key = self.get_gname_from_pid(key)
                 if not key:
                     continue
-            cur = int(round(values.delta / sleeptime)) if values.delta else ''
+            cur = int(round(values.delta / sleeptime)) if values.delta else 0
+            if cur < 0:
+                guest_removed = True
+                continue
             if key[0] != ' ':
                 if values.delta:
                     tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
                                values.value * 100 / float(ltotal), cur))
             row += 1
         if row == 3:
-            self.screen.addstr(4, 1, 'No matching events reported yet')
+            if guest_removed:
+                self.screen.addstr(4, 1, 'Guest removed, updating...')
+            else:
+                self.screen.addstr(4, 1, 'No matching events reported yet')
         if row > 4:
             tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
             self.screen.addstr(row, 1, '%-40s %10d        %8s' %
                                ('Total', total, tavg), curses.A_BOLD)
         self.screen.refresh()
 
+    def _display_guest_dead(self):
+        marker = '   Guest is DEAD   '
+        y = min(len(self._headline), 80 - len(marker))
+        self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
+
     def _show_msg(self, text):
         """Display message centered text and exit on key press"""
         hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
         (x, term_width) = self.screen.getmaxyx()
         row = 2
         for line in text:
-            start = (term_width - len(line)) / 2
+            start = (term_width - len(line)) // 2
             self.screen.addstr(row, start, line)
             row += 1
-        self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+        self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
                            curses.A_STANDOUT)
         self.screen.getkey()
 
@@ -1319,6 +1352,12 @@ class Tui(object):
                 msg = '"' + str(val) + '": Invalid value'
         self._refresh_header()
 
+    def _is_running_guest(self, pid):
+        """Check if pid is still a running process."""
+        if not pid:
+            return True
+        return os.path.isdir(os.path.join('/proc/', str(pid)))
+
     def _show_vm_selection_by_guest(self):
         """Draws guest selection mask.
 
@@ -1346,7 +1385,7 @@ class Tui(object):
             if not guest or guest == '0':
                 break
             if guest.isdigit():
-                if not os.path.isdir(os.path.join('/proc/', guest)):
+                if not self._is_running_guest(guest):
                     msg = '"' + guest + '": Not a running process'
                     continue
                 pid = int(guest)
index 9b0ca3ad1ef3863a8d6f877a74e75a0ce40f6dd5..9dafb8cb752fe066284dd9ea6d34e59ee97c5741 100644 (file)
@@ -129,12 +129,12 @@ $(OUTPUT)liblockdep.a: $(LIB_IN)
 tags:  force
        $(RM) tags
        find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
-       --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
+       --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
 
 TAGS:  force
        $(RM) TAGS
        find . -name '*.[ch]' | xargs etags \
-       --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
+       --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
 
 define do_install
        $(print_install)                                \
index 46cd5f871ad76d48bc7a34f725a31fa12ed5614c..0b4e833088a4dc9653fe065cba012141a815c014 100644 (file)
@@ -233,12 +233,12 @@ endef
 tags:  force
        $(RM) tags
        find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
-       --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
+       --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
 
 TAGS:  force
        $(RM) TAGS
        find . -name '*.[ch]' | xargs etags \
-       --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
+       --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
 
 define do_install_mkdir
        if [ ! -d '$(DESTDIR_SQ)$1' ]; then             \
index e5f2acbb70cc1db3f7274e2b2846594b5ca6d14d..ce1e20227c64d4e0789b6dad21a407988de720d9 100644 (file)
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  *  The parts for function graph printing was taken and modified from the
  *  Linux Kernel that were written by
@@ -73,12 +59,12 @@ static void init_input_buf(const char *buf, unsigned long long size)
        input_buf_ptr = 0;
 }
 
-const char *pevent_get_input_buf(void)
+const char *tep_get_input_buf(void)
 {
        return input_buf;
 }
 
-unsigned long long pevent_get_input_buf_ptr(void)
+unsigned long long tep_get_input_buf_ptr(void)
 {
        return input_buf_ptr;
 }
@@ -88,21 +74,21 @@ struct event_handler {
        int                             id;
        const char                      *sys_name;
        const char                      *event_name;
-       pevent_event_handler_func       func;
+       tep_event_handler_func          func;
        void                            *context;
 };
 
-struct pevent_func_params {
-       struct pevent_func_params       *next;
-       enum pevent_func_arg_type       type;
+struct func_params {
+       struct func_params      *next;
+       enum tep_func_arg_type  type;
 };
 
-struct pevent_function_handler {
-       struct pevent_function_handler  *next;
-       enum pevent_func_arg_type       ret_type;
+struct tep_function_handler {
+       struct tep_function_handler     *next;
+       enum tep_func_arg_type          ret_type;
        char                            *name;
-       pevent_func_handler             func;
-       struct pevent_func_params       *params;
+       tep_func_handler                func;
+       struct func_params              *params;
        int                             nr_args;
 };
 
@@ -110,17 +96,17 @@ static unsigned long long
 process_defined_func(struct trace_seq *s, void *data, int size,
                     struct event_format *event, struct print_arg *arg);
 
-static void free_func_handle(struct pevent_function_handler *func);
+static void free_func_handle(struct tep_function_handler *func);
 
 /**
- * pevent_buffer_init - init buffer for parsing
+ * tep_buffer_init - init buffer for parsing
  * @buf: buffer to parse
  * @size: the size of the buffer
  *
- * For use with pevent_read_token(), this initializes the internal
- * buffer that pevent_read_token() will parse.
+ * For use with tep_read_token(), this initializes the internal
+ * buffer that tep_read_token() will parse.
  */
-void pevent_buffer_init(const char *buf, unsigned long long size)
+void tep_buffer_init(const char *buf, unsigned long long size)
 {
        init_input_buf(buf, size);
 }
@@ -160,7 +146,7 @@ struct cmdline_list {
        int                     pid;
 };
 
-static int cmdline_init(struct pevent *pevent)
+static int cmdline_init(struct tep_handle *pevent)
 {
        struct cmdline_list *cmdlist = pevent->cmdlist;
        struct cmdline_list *item;
@@ -189,7 +175,7 @@ static int cmdline_init(struct pevent *pevent)
        return 0;
 }
 
-static const char *find_cmdline(struct pevent *pevent, int pid)
+static const char *find_cmdline(struct tep_handle *pevent, int pid)
 {
        const struct cmdline *comm;
        struct cmdline key;
@@ -211,14 +197,14 @@ static const char *find_cmdline(struct pevent *pevent, int pid)
 }
 
 /**
- * pevent_pid_is_registered - return if a pid has a cmdline registered
+ * tep_pid_is_registered - return if a pid has a cmdline registered
  * @pevent: handle for the pevent
  * @pid: The pid to check if it has a cmdline registered with.
  *
  * Returns 1 if the pid has a cmdline mapped to it
  * 0 otherwise.
  */
-int pevent_pid_is_registered(struct pevent *pevent, int pid)
+int tep_pid_is_registered(struct tep_handle *pevent, int pid)
 {
        const struct cmdline *comm;
        struct cmdline key;
@@ -244,7 +230,7 @@ int pevent_pid_is_registered(struct pevent *pevent, int pid)
  * we must add this pid. This is much slower than when cmdlines
  * are added before the array is initialized.
  */
-static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
+static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
 {
        struct cmdline *cmdlines = pevent->cmdlines;
        const struct cmdline *cmdline;
@@ -288,7 +274,7 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
 }
 
 /**
- * pevent_register_comm - register a pid / comm mapping
+ * tep_register_comm - register a pid / comm mapping
  * @pevent: handle for the pevent
  * @comm: the command line to register
  * @pid: the pid to map the command line to
@@ -296,7 +282,7 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid)
  * This adds a mapping to search for command line names with
  * a given pid. The comm is duplicated.
  */
-int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
+int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
 {
        struct cmdline_list *item;
 
@@ -324,7 +310,7 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid)
        return 0;
 }
 
-int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock)
+int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
 {
        pevent->trace_clock = strdup(trace_clock);
        if (!pevent->trace_clock) {
@@ -381,7 +367,7 @@ static int func_bcmp(const void *a, const void *b)
        return 1;
 }
 
-static int func_map_init(struct pevent *pevent)
+static int func_map_init(struct tep_handle *pevent)
 {
        struct func_list *funclist;
        struct func_list *item;
@@ -421,7 +407,7 @@ static int func_map_init(struct pevent *pevent)
 }
 
 static struct func_map *
-__find_func(struct pevent *pevent, unsigned long long addr)
+__find_func(struct tep_handle *pevent, unsigned long long addr)
 {
        struct func_map *func;
        struct func_map key;
@@ -438,13 +424,13 @@ __find_func(struct pevent *pevent, unsigned long long addr)
 }
 
 struct func_resolver {
-       pevent_func_resolver_t *func;
-       void                   *priv;
-       struct func_map        map;
+       tep_func_resolver_t     *func;
+       void                    *priv;
+       struct func_map         map;
 };
 
 /**
- * pevent_set_function_resolver - set an alternative function resolver
+ * tep_set_function_resolver - set an alternative function resolver
  * @pevent: handle for the pevent
  * @resolver: function to be used
  * @priv: resolver function private state.
@@ -453,8 +439,8 @@ struct func_resolver {
  * keep using it instead of duplicating all the entries inside
  * pevent->funclist.
  */
-int pevent_set_function_resolver(struct pevent *pevent,
-                                pevent_func_resolver_t *func, void *priv)
+int tep_set_function_resolver(struct tep_handle *pevent,
+                             tep_func_resolver_t *func, void *priv)
 {
        struct func_resolver *resolver = malloc(sizeof(*resolver));
 
@@ -471,20 +457,20 @@ int pevent_set_function_resolver(struct pevent *pevent,
 }
 
 /**
- * pevent_reset_function_resolver - reset alternative function resolver
+ * tep_reset_function_resolver - reset alternative function resolver
  * @pevent: handle for the pevent
  *
  * Stop using whatever alternative resolver was set, use the default
  * one instead.
  */
-void pevent_reset_function_resolver(struct pevent *pevent)
+void tep_reset_function_resolver(struct tep_handle *pevent)
 {
        free(pevent->func_resolver);
        pevent->func_resolver = NULL;
 }
 
 static struct func_map *
-find_func(struct pevent *pevent, unsigned long long addr)
+find_func(struct tep_handle *pevent, unsigned long long addr)
 {
        struct func_map *map;
 
@@ -503,7 +489,7 @@ find_func(struct pevent *pevent, unsigned long long addr)
 }
 
 /**
- * pevent_find_function - find a function by a given address
+ * tep_find_function - find a function by a given address
  * @pevent: handle for the pevent
  * @addr: the address to find the function with
  *
@@ -511,7 +497,7 @@ find_func(struct pevent *pevent, unsigned long long addr)
  * address. Note, the address does not have to be exact, it
  * will select the function that would contain the address.
  */
-const char *pevent_find_function(struct pevent *pevent, unsigned long long addr)
+const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr)
 {
        struct func_map *map;
 
@@ -523,16 +509,16 @@ const char *pevent_find_function(struct pevent *pevent, unsigned long long addr)
 }
 
 /**
- * pevent_find_function_address - find a function address by a given address
+ * tep_find_function_address - find a function address by a given address
  * @pevent: handle for the pevent
  * @addr: the address to find the function with
  *
  * Returns the address the function starts at. This can be used in
- * conjunction with pevent_find_function to print both the function
+ * conjunction with tep_find_function to print both the function
  * name and the function offset.
  */
 unsigned long long
-pevent_find_function_address(struct pevent *pevent, unsigned long long addr)
+tep_find_function_address(struct tep_handle *pevent, unsigned long long addr)
 {
        struct func_map *map;
 
@@ -544,7 +530,7 @@ pevent_find_function_address(struct pevent *pevent, unsigned long long addr)
 }
 
 /**
- * pevent_register_function - register a function with a given address
+ * tep_register_function - register a function with a given address
  * @pevent: handle for the pevent
  * @function: the function name to register
  * @addr: the address the function starts at
@@ -553,8 +539,8 @@ pevent_find_function_address(struct pevent *pevent, unsigned long long addr)
  * This registers a function name with an address and module.
  * The @func passed in is duplicated.
  */
-int pevent_register_function(struct pevent *pevent, char *func,
-                            unsigned long long addr, char *mod)
+int tep_register_function(struct tep_handle *pevent, char *func,
+                         unsigned long long addr, char *mod)
 {
        struct func_list *item = malloc(sizeof(*item));
 
@@ -589,12 +575,12 @@ out_free:
 }
 
 /**
- * pevent_print_funcs - print out the stored functions
+ * tep_print_funcs - print out the stored functions
  * @pevent: handle for the pevent
  *
  * This prints out the stored functions.
  */
-void pevent_print_funcs(struct pevent *pevent)
+void tep_print_funcs(struct tep_handle *pevent)
 {
        int i;
 
@@ -636,7 +622,7 @@ static int printk_cmp(const void *a, const void *b)
        return 0;
 }
 
-static int printk_map_init(struct pevent *pevent)
+static int printk_map_init(struct tep_handle *pevent)
 {
        struct printk_list *printklist;
        struct printk_list *item;
@@ -668,7 +654,7 @@ static int printk_map_init(struct pevent *pevent)
 }
 
 static struct printk_map *
-find_printk(struct pevent *pevent, unsigned long long addr)
+find_printk(struct tep_handle *pevent, unsigned long long addr)
 {
        struct printk_map *printk;
        struct printk_map key;
@@ -685,7 +671,7 @@ find_printk(struct pevent *pevent, unsigned long long addr)
 }
 
 /**
- * pevent_register_print_string - register a string by its address
+ * tep_register_print_string - register a string by its address
  * @pevent: handle for the pevent
  * @fmt: the string format to register
  * @addr: the address the string was located at
@@ -693,8 +679,8 @@ find_printk(struct pevent *pevent, unsigned long long addr)
  * This registers a string by the address it was stored in the kernel.
  * The @fmt passed in is duplicated.
  */
-int pevent_register_print_string(struct pevent *pevent, const char *fmt,
-                                unsigned long long addr)
+int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+                             unsigned long long addr)
 {
        struct printk_list *item = malloc(sizeof(*item));
        char *p;
@@ -732,12 +718,12 @@ out_free:
 }
 
 /**
- * pevent_print_printk - print out the stored strings
+ * tep_print_printk - print out the stored strings
  * @pevent: handle for the pevent
  *
  * This prints the string formats that were stored.
  */
-void pevent_print_printk(struct pevent *pevent)
+void tep_print_printk(struct tep_handle *pevent)
 {
        int i;
 
@@ -756,7 +742,7 @@ static struct event_format *alloc_event(void)
        return calloc(1, sizeof(struct event_format));
 }
 
-static int add_event(struct pevent *pevent, struct event_format *event)
+static int add_event(struct tep_handle *pevent, struct event_format *event)
 {
        int i;
        struct event_format **events = realloc(pevent->events, sizeof(event) *
@@ -913,11 +899,11 @@ static int __peek_char(void)
 }
 
 /**
- * pevent_peek_char - peek at the next character that will be read
+ * tep_peek_char - peek at the next character that will be read
  *
  * Returns the next character read, or -1 if end of buffer.
  */
-int pevent_peek_char(void)
+int tep_peek_char(void)
 {
        return __peek_char();
 }
@@ -1157,24 +1143,24 @@ static enum event_type read_token(char **tok)
 }
 
 /**
- * pevent_read_token - access to utilites to use the pevent parser
+ * tep_read_token - access to utilites to use the pevent parser
  * @tok: The token to return
  *
  * This will parse tokens from the string given by
- * pevent_init_data().
+ * tep_init_data().
  *
  * Returns the token type.
  */
-enum event_type pevent_read_token(char **tok)
+enum event_type tep_read_token(char **tok)
 {
        return read_token(tok);
 }
 
 /**
- * pevent_free_token - free a token returned by pevent_read_token
+ * tep_free_token - free a token returned by tep_read_token
  * @token: the token to free
  */
-void pevent_free_token(char *token)
+void tep_free_token(char *token)
 {
        free_token(token);
 }
@@ -2101,11 +2087,11 @@ process_entry(struct event_format *event __maybe_unused, struct print_arg *arg,
        arg->field.name = field;
 
        if (is_flag_field) {
-               arg->field.field = pevent_find_any_field(event, arg->field.name);
+               arg->field.field = tep_find_any_field(event, arg->field.name);
                arg->field.field->flags |= FIELD_IS_FLAG;
                is_flag_field = 0;
        } else if (is_symbolic_field) {
-               arg->field.field = pevent_find_any_field(event, arg->field.name);
+               arg->field.field = tep_find_any_field(event, arg->field.name);
                arg->field.field->flags |= FIELD_IS_SYMBOLIC;
                is_symbolic_field = 0;
        }
@@ -2714,7 +2700,7 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
 
        /* Find the field */
 
-       field = pevent_find_field(event, token);
+       field = tep_find_field(event, token);
        if (!field)
                goto out_free;
 
@@ -2771,7 +2757,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg,
        arg->type = PRINT_DYNAMIC_ARRAY_LEN;
 
        /* Find the field */
-       field = pevent_find_field(event, token);
+       field = tep_find_field(event, token);
        if (!field)
                goto out_free;
 
@@ -2914,10 +2900,10 @@ process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg
        return EVENT_ERROR;
 }
 
-static struct pevent_function_handler *
-find_func_handler(struct pevent *pevent, char *func_name)
+static struct tep_function_handler *
+find_func_handler(struct tep_handle *pevent, char *func_name)
 {
-       struct pevent_function_handler *func;
+       struct tep_function_handler *func;
 
        if (!pevent)
                return NULL;
@@ -2930,10 +2916,10 @@ find_func_handler(struct pevent *pevent, char *func_name)
        return func;
 }
 
-static void remove_func_handler(struct pevent *pevent, char *func_name)
+static void remove_func_handler(struct tep_handle *pevent, char *func_name)
 {
-       struct pevent_function_handler *func;
-       struct pevent_function_handler **next;
+       struct tep_function_handler *func;
+       struct tep_function_handler **next;
 
        next = &pevent->func_handlers;
        while ((func = *next)) {
@@ -2947,7 +2933,7 @@ static void remove_func_handler(struct pevent *pevent, char *func_name)
 }
 
 static enum event_type
-process_func_handler(struct event_format *event, struct pevent_function_handler *func,
+process_func_handler(struct event_format *event, struct tep_function_handler *func,
                     struct print_arg *arg, char **tok)
 {
        struct print_arg **next_arg;
@@ -3008,7 +2994,7 @@ static enum event_type
 process_function(struct event_format *event, struct print_arg *arg,
                 char *token, char **tok)
 {
-       struct pevent_function_handler *func;
+       struct tep_function_handler *func;
 
        if (strcmp(token, "__print_flags") == 0) {
                free_token(token);
@@ -3265,7 +3251,7 @@ static int event_read_print(struct event_format *event)
 }
 
 /**
- * pevent_find_common_field - return a common field by event
+ * tep_find_common_field - return a common field by event
  * @event: handle for the event
  * @name: the name of the common field to return
  *
@@ -3273,7 +3259,7 @@ static int event_read_print(struct event_format *event)
  * This only searchs the common fields and not all field.
  */
 struct format_field *
-pevent_find_common_field(struct event_format *event, const char *name)
+tep_find_common_field(struct event_format *event, const char *name)
 {
        struct format_field *format;
 
@@ -3287,7 +3273,7 @@ pevent_find_common_field(struct event_format *event, const char *name)
 }
 
 /**
- * pevent_find_field - find a non-common field
+ * tep_find_field - find a non-common field
  * @event: handle for the event
  * @name: the name of the non-common field
  *
@@ -3295,7 +3281,7 @@ pevent_find_common_field(struct event_format *event, const char *name)
  * This does not search common fields.
  */
 struct format_field *
-pevent_find_field(struct event_format *event, const char *name)
+tep_find_field(struct event_format *event, const char *name)
 {
        struct format_field *format;
 
@@ -3309,7 +3295,7 @@ pevent_find_field(struct event_format *event, const char *name)
 }
 
 /**
- * pevent_find_any_field - find any field by name
+ * tep_find_any_field - find any field by name
  * @event: handle for the event
  * @name: the name of the field
  *
@@ -3318,18 +3304,18 @@ pevent_find_field(struct event_format *event, const char *name)
  * the non-common ones if a common one was not found.
  */
 struct format_field *
-pevent_find_any_field(struct event_format *event, const char *name)
+tep_find_any_field(struct event_format *event, const char *name)
 {
        struct format_field *format;
 
-       format = pevent_find_common_field(event, name);
+       format = tep_find_common_field(event, name);
        if (format)
                return format;
-       return pevent_find_field(event, name);
+       return tep_find_field(event, name);
 }
 
 /**
- * pevent_read_number - read a number from data
+ * tep_read_number - read a number from data
  * @pevent: handle for the pevent
  * @ptr: the raw data
  * @size: the size of the data that holds the number
@@ -3337,8 +3323,8 @@ pevent_find_any_field(struct event_format *event, const char *name)
  * Returns the number (converted to host) from the
  * raw data.
  */
-unsigned long long pevent_read_number(struct pevent *pevent,
-                                     const void *ptr, int size)
+unsigned long long tep_read_number(struct tep_handle *pevent,
+                                  const void *ptr, int size)
 {
        switch (size) {
        case 1:
@@ -3356,7 +3342,7 @@ unsigned long long pevent_read_number(struct pevent *pevent,
 }
 
 /**
- * pevent_read_number_field - read a number from data
+ * tep_read_number_field - read a number from data
  * @field: a handle to the field
  * @data: the raw data to read
  * @value: the value to place the number in
@@ -3366,8 +3352,8 @@ unsigned long long pevent_read_number(struct pevent *pevent,
  *
  * Returns 0 on success, -1 otherwise.
  */
-int pevent_read_number_field(struct format_field *field, const void *data,
-                            unsigned long long *value)
+int tep_read_number_field(struct format_field *field, const void *data,
+                         unsigned long long *value)
 {
        if (!field)
                return -1;
@@ -3376,15 +3362,15 @@ int pevent_read_number_field(struct format_field *field, const void *data,
        case 2:
        case 4:
        case 8:
-               *value = pevent_read_number(field->event->pevent,
-                                           data + field->offset, field->size);
+               *value = tep_read_number(field->event->pevent,
+                                        data + field->offset, field->size);
                return 0;
        default:
                return -1;
        }
 }
 
-static int get_common_info(struct pevent *pevent,
+static int get_common_info(struct tep_handle *pevent,
                           const char *type, int *offset, int *size)
 {
        struct event_format *event;
@@ -3400,7 +3386,7 @@ static int get_common_info(struct pevent *pevent,
        }
 
        event = pevent->events[0];
-       field = pevent_find_common_field(event, type);
+       field = tep_find_common_field(event, type);
        if (!field)
                return -1;
 
@@ -3410,7 +3396,7 @@ static int get_common_info(struct pevent *pevent,
        return 0;
 }
 
-static int __parse_common(struct pevent *pevent, void *data,
+static int __parse_common(struct tep_handle *pevent, void *data,
                          int *size, int *offset, const char *name)
 {
        int ret;
@@ -3420,45 +3406,45 @@ static int __parse_common(struct pevent *pevent, void *data,
                if (ret < 0)
                        return ret;
        }
-       return pevent_read_number(pevent, data + *offset, *size);
+       return tep_read_number(pevent, data + *offset, *size);
 }
 
-static int trace_parse_common_type(struct pevent *pevent, void *data)
+static int trace_parse_common_type(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->type_size, &pevent->type_offset,
                              "common_type");
 }
 
-static int parse_common_pid(struct pevent *pevent, void *data)
+static int parse_common_pid(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->pid_size, &pevent->pid_offset,
                              "common_pid");
 }
 
-static int parse_common_pc(struct pevent *pevent, void *data)
+static int parse_common_pc(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->pc_size, &pevent->pc_offset,
                              "common_preempt_count");
 }
 
-static int parse_common_flags(struct pevent *pevent, void *data)
+static int parse_common_flags(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->flags_size, &pevent->flags_offset,
                              "common_flags");
 }
 
-static int parse_common_lock_depth(struct pevent *pevent, void *data)
+static int parse_common_lock_depth(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->ld_size, &pevent->ld_offset,
                              "common_lock_depth");
 }
 
-static int parse_common_migrate_disable(struct pevent *pevent, void *data)
+static int parse_common_migrate_disable(struct tep_handle *pevent, void *data)
 {
        return __parse_common(pevent, data,
                              &pevent->ld_size, &pevent->ld_offset,
@@ -3468,13 +3454,13 @@ static int parse_common_migrate_disable(struct pevent *pevent, void *data)
 static int events_id_cmp(const void *a, const void *b);
 
 /**
- * pevent_find_event - find an event by given id
+ * tep_find_event - find an event by given id
  * @pevent: a handle to the pevent
  * @id: the id of the event
  *
  * Returns an event that has a given @id.
  */
-struct event_format *pevent_find_event(struct pevent *pevent, int id)
+struct event_format *tep_find_event(struct tep_handle *pevent, int id)
 {
        struct event_format **eventptr;
        struct event_format key;
@@ -3498,7 +3484,7 @@ struct event_format *pevent_find_event(struct pevent *pevent, int id)
 }
 
 /**
- * pevent_find_event_by_name - find an event by given name
+ * tep_find_event_by_name - find an event by given name
  * @pevent: a handle to the pevent
  * @sys: the system name to search for
  * @name: the name of the event to search for
@@ -3507,8 +3493,8 @@ struct event_format *pevent_find_event(struct pevent *pevent, int id)
  * @sys. If @sys is NULL the first event with @name is returned.
  */
 struct event_format *
-pevent_find_event_by_name(struct pevent *pevent,
-                         const char *sys, const char *name)
+tep_find_event_by_name(struct tep_handle *pevent,
+                      const char *sys, const char *name)
 {
        struct event_format *event;
        int i;
@@ -3537,7 +3523,7 @@ pevent_find_event_by_name(struct pevent *pevent,
 static unsigned long long
 eval_num_arg(void *data, int size, struct event_format *event, struct print_arg *arg)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        unsigned long long val = 0;
        unsigned long long left, right;
        struct print_arg *typearg = NULL;
@@ -3553,14 +3539,14 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                return strtoull(arg->atom.atom, NULL, 0);
        case PRINT_FIELD:
                if (!arg->field.field) {
-                       arg->field.field = pevent_find_any_field(event, arg->field.name);
+                       arg->field.field = tep_find_any_field(event, arg->field.name);
                        if (!arg->field.field)
                                goto out_warning_field;
                        
                }
                /* must be a number */
-               val = pevent_read_number(pevent, data + arg->field.field->offset,
-                               arg->field.field->size);
+               val = tep_read_number(pevent, data + arg->field.field->offset,
+                                     arg->field.field->size);
                break;
        case PRINT_FLAGS:
        case PRINT_SYMBOL:
@@ -3603,7 +3589,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
 
                        switch (larg->type) {
                        case PRINT_DYNAMIC_ARRAY:
-                               offset = pevent_read_number(pevent,
+                               offset = tep_read_number(pevent,
                                                   data + larg->dynarray.field->offset,
                                                   larg->dynarray.field->size);
                                if (larg->dynarray.field->elementsize)
@@ -3619,7 +3605,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                        case PRINT_FIELD:
                                if (!larg->field.field) {
                                        larg->field.field =
-                                               pevent_find_any_field(event, larg->field.name);
+                                               tep_find_any_field(event, larg->field.name);
                                        if (!larg->field.field) {
                                                arg = larg;
                                                goto out_warning_field;
@@ -3632,8 +3618,8 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                        default:
                                goto default_op; /* oops, all bets off */
                        }
-                       val = pevent_read_number(pevent,
-                                                data + offset, field_size);
+                       val = tep_read_number(pevent,
+                                             data + offset, field_size);
                        if (typearg)
                                val = eval_type(val, typearg, 1);
                        break;
@@ -3733,9 +3719,9 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                }
                break;
        case PRINT_DYNAMIC_ARRAY_LEN:
-               offset = pevent_read_number(pevent,
-                                           data + arg->dynarray.field->offset,
-                                           arg->dynarray.field->size);
+               offset = tep_read_number(pevent,
+                                        data + arg->dynarray.field->offset,
+                                        arg->dynarray.field->size);
                /*
                 * The total allocated length of the dynamic array is
                 * stored in the top half of the field, and the offset
@@ -3745,9 +3731,9 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                break;
        case PRINT_DYNAMIC_ARRAY:
                /* Without [], we pass the address to the dynamic data */
-               offset = pevent_read_number(pevent,
-                                           data + arg->dynarray.field->offset,
-                                           arg->dynarray.field->size);
+               offset = tep_read_number(pevent,
+                                        data + arg->dynarray.field->offset,
+                                        arg->dynarray.field->size);
                /*
                 * The total allocated length of the dynamic array is
                 * stored in the top half of the field, and the offset
@@ -3820,7 +3806,7 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
                trace_seq_printf(s, format, str);
 }
 
-static void print_bitmask_to_seq(struct pevent *pevent,
+static void print_bitmask_to_seq(struct tep_handle *pevent,
                                 struct trace_seq *s, const char *format,
                                 int len_arg, const void *data, int size)
 {
@@ -3878,7 +3864,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                          struct event_format *event, const char *format,
                          int len_arg, struct print_arg *arg)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        struct print_flag_sym *flag;
        struct format_field *field;
        struct printk_map *printk;
@@ -3899,7 +3885,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        case PRINT_FIELD:
                field = arg->field.field;
                if (!field) {
-                       field = pevent_find_any_field(event, arg->field.name);
+                       field = tep_find_any_field(event, arg->field.name);
                        if (!field) {
                                str = arg->field.name;
                                goto out_warning_field;
@@ -3992,7 +3978,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        case PRINT_HEX_STR:
                if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
                        unsigned long offset;
-                       offset = pevent_read_number(pevent,
+                       offset = tep_read_number(pevent,
                                data + arg->hex.field->dynarray.field->offset,
                                arg->hex.field->dynarray.field->size);
                        hex = data + (offset & 0xffff);
@@ -4000,7 +3986,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        field = arg->hex.field->field.field;
                        if (!field) {
                                str = arg->hex.field->field.name;
-                               field = pevent_find_any_field(event, str);
+                               field = tep_find_any_field(event, str);
                                if (!field)
                                        goto out_warning_field;
                                arg->hex.field->field.field = field;
@@ -4023,15 +4009,15 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                        unsigned long offset;
                        struct format_field *field =
                                arg->int_array.field->dynarray.field;
-                       offset = pevent_read_number(pevent,
-                                                   data + field->offset,
-                                                   field->size);
+                       offset = tep_read_number(pevent,
+                                                data + field->offset,
+                                                field->size);
                        num = data + (offset & 0xffff);
                } else {
                        field = arg->int_array.field->field.field;
                        if (!field) {
                                str = arg->int_array.field->field.name;
-                               field = pevent_find_any_field(event, str);
+                               field = tep_find_any_field(event, str);
                                if (!field)
                                        goto out_warning_field;
                                arg->int_array.field->field.field = field;
@@ -4071,7 +4057,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                if (arg->string.offset == -1) {
                        struct format_field *f;
 
-                       f = pevent_find_any_field(event, arg->string.string);
+                       f = tep_find_any_field(event, arg->string.string);
                        arg->string.offset = f->offset;
                }
                str_offset = data2host4(pevent, data + arg->string.offset);
@@ -4089,7 +4075,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                if (arg->bitmask.offset == -1) {
                        struct format_field *f;
 
-                       f = pevent_find_any_field(event, arg->bitmask.bitmask);
+                       f = tep_find_any_field(event, arg->bitmask.bitmask);
                        arg->bitmask.offset = f->offset;
                }
                bitmask_offset = data2host4(pevent, data + arg->bitmask.offset);
@@ -4132,8 +4118,8 @@ static unsigned long long
 process_defined_func(struct trace_seq *s, void *data, int size,
                     struct event_format *event, struct print_arg *arg)
 {
-       struct pevent_function_handler *func_handle = arg->func.func;
-       struct pevent_func_params *param;
+       struct tep_function_handler *func_handle = arg->func.func;
+       struct func_params *param;
        unsigned long long *args;
        unsigned long long ret;
        struct print_arg *farg;
@@ -4159,12 +4145,12 @@ process_defined_func(struct trace_seq *s, void *data, int size,
 
        for (i = 0; i < func_handle->nr_args; i++) {
                switch (param->type) {
-               case PEVENT_FUNC_ARG_INT:
-               case PEVENT_FUNC_ARG_LONG:
-               case PEVENT_FUNC_ARG_PTR:
+               case TEP_FUNC_ARG_INT:
+               case TEP_FUNC_ARG_LONG:
+               case TEP_FUNC_ARG_PTR:
                        args[i] = eval_num_arg(data, size, event, farg);
                        break;
-               case PEVENT_FUNC_ARG_STRING:
+               case TEP_FUNC_ARG_STRING:
                        trace_seq_init(&str);
                        print_str_arg(&str, data, size, event, "%s", -1, farg);
                        trace_seq_terminate(&str);
@@ -4227,7 +4213,7 @@ static void free_args(struct print_arg *args)
 
 static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        struct format_field *field, *ip_field;
        struct print_arg *args, *arg, **next;
        unsigned long long ip, val;
@@ -4239,12 +4225,12 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
        ip_field = pevent->bprint_ip_field;
 
        if (!field) {
-               field = pevent_find_field(event, "buf");
+               field = tep_find_field(event, "buf");
                if (!field) {
                        do_warning_event(event, "can't find buffer field for binary printk");
                        return NULL;
                }
-               ip_field = pevent_find_field(event, "ip");
+               ip_field = tep_find_field(event, "ip");
                if (!ip_field) {
                        do_warning_event(event, "can't find ip field for binary printk");
                        return NULL;
@@ -4253,7 +4239,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
                pevent->bprint_ip_field = ip_field;
        }
 
-       ip = pevent_read_number(pevent, data + ip_field->offset, ip_field->size);
+       ip = tep_read_number(pevent, data + ip_field->offset, ip_field->size);
 
        /*
         * The first arg is the IP pointer.
@@ -4347,7 +4333,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
                                /* the pointers are always 4 bytes aligned */
                                bptr = (void *)(((unsigned long)bptr + 3) &
                                                ~3);
-                               val = pevent_read_number(pevent, bptr, vsize);
+                               val = tep_read_number(pevent, bptr, vsize);
                                bptr += vsize;
                                arg = alloc_arg();
                                if (!arg) {
@@ -4404,7 +4390,7 @@ static char *
 get_bprint_format(void *data, int size __maybe_unused,
                  struct event_format *event)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        unsigned long long addr;
        struct format_field *field;
        struct printk_map *printk;
@@ -4413,7 +4399,7 @@ get_bprint_format(void *data, int size __maybe_unused,
        field = pevent->bprint_fmt_field;
 
        if (!field) {
-               field = pevent_find_field(event, "fmt");
+               field = tep_find_field(event, "fmt");
                if (!field) {
                        do_warning_event(event, "can't find format field for binary printk");
                        return NULL;
@@ -4421,7 +4407,7 @@ get_bprint_format(void *data, int size __maybe_unused,
                pevent->bprint_fmt_field = field;
        }
 
-       addr = pevent_read_number(pevent, data + field->offset, field->size);
+       addr = tep_read_number(pevent, data + field->offset, field->size);
 
        printk = find_printk(pevent, addr);
        if (!printk) {
@@ -4457,7 +4443,7 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
                fmt = "%.2x%.2x%.2x%.2x%.2x%.2x";
        if (!arg->field.field) {
                arg->field.field =
-                       pevent_find_any_field(event, arg->field.name);
+                       tep_find_any_field(event, arg->field.name);
                if (!arg->field.field) {
                        do_warning_event(event, "%s: field %s not found",
                                         __func__, arg->field.name);
@@ -4607,7 +4593,7 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
 
        if (!arg->field.field) {
                arg->field.field =
-                       pevent_find_any_field(event, arg->field.name);
+                       tep_find_any_field(event, arg->field.name);
                if (!arg->field.field) {
                        do_warning("%s: field %s not found",
                                   __func__, arg->field.name);
@@ -4653,7 +4639,7 @@ static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
 
        if (!arg->field.field) {
                arg->field.field =
-                       pevent_find_any_field(event, arg->field.name);
+                       tep_find_any_field(event, arg->field.name);
                if (!arg->field.field) {
                        do_warning("%s: field %s not found",
                                   __func__, arg->field.name);
@@ -4711,7 +4697,7 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
 
        if (!arg->field.field) {
                arg->field.field =
-                       pevent_find_any_field(event, arg->field.name);
+                       tep_find_any_field(event, arg->field.name);
                if (!arg->field.field) {
                        do_warning("%s: field %s not found",
                                   __func__, arg->field.name);
@@ -4800,18 +4786,18 @@ static int is_printable_array(char *p, unsigned int len)
        return 1;
 }
 
-void pevent_print_field(struct trace_seq *s, void *data,
-                       struct format_field *field)
+void tep_print_field(struct trace_seq *s, void *data,
+                    struct format_field *field)
 {
        unsigned long long val;
        unsigned int offset, len, i;
-       struct pevent *pevent = field->event->pevent;
+       struct tep_handle *pevent = field->event->pevent;
 
        if (field->flags & FIELD_IS_ARRAY) {
                offset = field->offset;
                len = field->size;
                if (field->flags & FIELD_IS_DYNAMIC) {
-                       val = pevent_read_number(pevent, data + offset, len);
+                       val = tep_read_number(pevent, data + offset, len);
                        offset = val;
                        len = offset >> 16;
                        offset &= 0xffff;
@@ -4831,8 +4817,8 @@ void pevent_print_field(struct trace_seq *s, void *data,
                        field->flags &= ~FIELD_IS_STRING;
                }
        } else {
-               val = pevent_read_number(pevent, data + field->offset,
-                                        field->size);
+               val = tep_read_number(pevent, data + field->offset,
+                                     field->size);
                if (field->flags & FIELD_IS_POINTER) {
                        trace_seq_printf(s, "0x%llx", val);
                } else if (field->flags & FIELD_IS_SIGNED) {
@@ -4865,22 +4851,22 @@ void pevent_print_field(struct trace_seq *s, void *data,
        }
 }
 
-void pevent_print_fields(struct trace_seq *s, void *data,
-                        int size __maybe_unused, struct event_format *event)
+void tep_print_fields(struct trace_seq *s, void *data,
+                     int size __maybe_unused, struct event_format *event)
 {
        struct format_field *field;
 
        field = event->format.fields;
        while (field) {
                trace_seq_printf(s, " %s=", field->name);
-               pevent_print_field(s, data, field);
+               tep_print_field(s, data, field);
                field = field->next;
        }
 }
 
 static void pretty_print(struct trace_seq *s, void *data, int size, struct event_format *event)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        struct print_fmt *print_fmt = &event->print_fmt;
        struct print_arg *arg = print_fmt->args;
        struct print_arg *args = NULL;
@@ -4899,7 +4885,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
 
        if (event->flags & EVENT_FL_FAILED) {
                trace_seq_printf(s, "[FAILED TO PARSE]");
-               pevent_print_fields(s, data, size, event);
+               tep_print_fields(s, data, size, event);
                return;
        }
 
@@ -5140,7 +5126,7 @@ out_failed:
 }
 
 /**
- * pevent_data_lat_fmt - parse the data for the latency format
+ * tep_data_lat_fmt - parse the data for the latency format
  * @pevent: a handle to the pevent
  * @s: the trace_seq to write to
  * @record: the record to read from
@@ -5149,8 +5135,8 @@ out_failed:
  * need rescheduling, in hard/soft interrupt, preempt count
  * and lock depth) and places it into the trace_seq.
  */
-void pevent_data_lat_fmt(struct pevent *pevent,
-                        struct trace_seq *s, struct pevent_record *record)
+void tep_data_lat_fmt(struct tep_handle *pevent,
+                     struct trace_seq *s, struct tep_record *record)
 {
        static int check_lock_depth = 1;
        static int check_migrate_disable = 1;
@@ -5223,55 +5209,55 @@ void pevent_data_lat_fmt(struct pevent *pevent,
 }
 
 /**
- * pevent_data_type - parse out the given event type
+ * tep_data_type - parse out the given event type
  * @pevent: a handle to the pevent
  * @rec: the record to read from
  *
  * This returns the event id from the @rec.
  */
-int pevent_data_type(struct pevent *pevent, struct pevent_record *rec)
+int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
 {
        return trace_parse_common_type(pevent, rec->data);
 }
 
 /**
- * pevent_data_event_from_type - find the event by a given type
+ * tep_data_event_from_type - find the event by a given type
  * @pevent: a handle to the pevent
  * @type: the type of the event.
  *
  * This returns the event form a given @type;
  */
-struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type)
+struct event_format *tep_data_event_from_type(struct tep_handle *pevent, int type)
 {
-       return pevent_find_event(pevent, type);
+       return tep_find_event(pevent, type);
 }
 
 /**
- * pevent_data_pid - parse the PID from record
+ * tep_data_pid - parse the PID from record
  * @pevent: a handle to the pevent
  * @rec: the record to parse
  *
  * This returns the PID from a record.
  */
-int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec)
+int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec)
 {
        return parse_common_pid(pevent, rec->data);
 }
 
 /**
- * pevent_data_preempt_count - parse the preempt count from the record
+ * tep_data_preempt_count - parse the preempt count from the record
  * @pevent: a handle to the pevent
  * @rec: the record to parse
  *
  * This returns the preempt count from a record.
  */
-int pevent_data_preempt_count(struct pevent *pevent, struct pevent_record *rec)
+int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec)
 {
        return parse_common_pc(pevent, rec->data);
 }
 
 /**
- * pevent_data_flags - parse the latency flags from the record
+ * tep_data_flags - parse the latency flags from the record
  * @pevent: a handle to the pevent
  * @rec: the record to parse
  *
@@ -5279,20 +5265,20 @@ int pevent_data_preempt_count(struct pevent *pevent, struct pevent_record *rec)
  *
  *  Use trace_flag_type enum for the flags (see event-parse.h).
  */
-int pevent_data_flags(struct pevent *pevent, struct pevent_record *rec)
+int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec)
 {
        return parse_common_flags(pevent, rec->data);
 }
 
 /**
- * pevent_data_comm_from_pid - return the command line from PID
+ * tep_data_comm_from_pid - return the command line from PID
  * @pevent: a handle to the pevent
  * @pid: the PID of the task to search for
  *
  * This returns a pointer to the command line that has the given
  * @pid.
  */
-const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
+const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
 {
        const char *comm;
 
@@ -5301,7 +5287,7 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
 }
 
 static struct cmdline *
-pid_from_cmdlist(struct pevent *pevent, const char *comm, struct cmdline *next)
+pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next)
 {
        struct cmdline_list *cmdlist = (struct cmdline_list *)next;
 
@@ -5317,7 +5303,7 @@ pid_from_cmdlist(struct pevent *pevent, const char *comm, struct cmdline *next)
 }
 
 /**
- * pevent_data_pid_from_comm - return the pid from a given comm
+ * tep_data_pid_from_comm - return the pid from a given comm
  * @pevent: a handle to the pevent
  * @comm: the cmdline to find the pid from
  * @next: the cmdline structure to find the next comm
@@ -5329,8 +5315,8 @@ pid_from_cmdlist(struct pevent *pevent, const char *comm, struct cmdline *next)
  * next pid.
  * Also, it does a linear seach, so it may be slow.
  */
-struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
-                                         struct cmdline *next)
+struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+                                      struct cmdline *next)
 {
        struct cmdline *cmdline;
 
@@ -5365,13 +5351,13 @@ struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *com
 }
 
 /**
- * pevent_cmdline_pid - return the pid associated to a given cmdline
+ * tep_cmdline_pid - return the pid associated to a given cmdline
  * @cmdline: The cmdline structure to get the pid from
  *
  * Returns the pid for a give cmdline. If @cmdline is NULL, then
  * -1 is returned.
  */
-int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline)
+int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
 {
        struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
 
@@ -5391,7 +5377,7 @@ int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline)
 }
 
 /**
- * pevent_data_comm_from_pid - parse the data into the print format
+ * tep_event_info - parse the data into the print format
  * @s: the trace_seq to write to
  * @event: the handle to the event
  * @record: the record to read from
@@ -5399,13 +5385,13 @@ int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline)
  * This parses the raw @data using the given @event information and
  * writes the print format into the trace_seq.
  */
-void pevent_event_info(struct trace_seq *s, struct event_format *event,
-                      struct pevent_record *record)
+void tep_event_info(struct trace_seq *s, struct event_format *event,
+                   struct tep_record *record)
 {
        int print_pretty = 1;
 
        if (event->pevent->print_raw || (event->flags & EVENT_FL_PRINTRAW))
-               pevent_print_fields(s, record->data, record->size, event);
+               tep_print_fields(s, record->data, record->size, event);
        else {
 
                if (event->handler && !(event->flags & EVENT_FL_NOHANDLE))
@@ -5433,7 +5419,7 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
 }
 
 /**
- * pevent_find_event_by_record - return the event from a given record
+ * tep_find_event_by_record - return the event from a given record
  * @pevent: a handle to the pevent
  * @record: The record to get the event from
  *
@@ -5441,7 +5427,7 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
  * is found.
  */
 struct event_format *
-pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record)
+tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
 {
        int type;
 
@@ -5452,11 +5438,11 @@ pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record)
 
        type = trace_parse_common_type(pevent, record->data);
 
-       return pevent_find_event(pevent, type);
+       return tep_find_event(pevent, type);
 }
 
 /**
- * pevent_print_event_task - Write the event task comm, pid and CPU
+ * tep_print_event_task - Write the event task comm, pid and CPU
  * @pevent: a handle to the pevent
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
@@ -5464,9 +5450,9 @@ pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record)
  *
  * Writes the tasks comm, pid and CPU to @s.
  */
-void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record)
+void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record)
 {
        void *data = record->data;
        const char *comm;
@@ -5483,7 +5469,7 @@ void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
 }
 
 /**
- * pevent_print_event_time - Write the event timestamp
+ * tep_print_event_time - Write the event timestamp
  * @pevent: a handle to the pevent
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
@@ -5492,10 +5478,10 @@ void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
  *
  * Writes the timestamp of the record into @s.
  */
-void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record,
-                            bool use_trace_clock)
+void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record,
+                         bool use_trace_clock)
 {
        unsigned long secs;
        unsigned long usecs;
@@ -5511,11 +5497,11 @@ void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
        }
 
        if (pevent->latency_format) {
-               pevent_data_lat_fmt(pevent, s, record);
+               tep_data_lat_fmt(pevent, s, record);
        }
 
        if (use_usec_format) {
-               if (pevent->flags & PEVENT_NSEC_OUTPUT) {
+               if (pevent->flags & TEP_NSEC_OUTPUT) {
                        usecs = nsecs;
                        p = 9;
                } else {
@@ -5534,7 +5520,7 @@ void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
 }
 
 /**
- * pevent_print_event_data - Write the event data section
+ * tep_print_event_data - Write the event data section
  * @pevent: a handle to the pevent
  * @s: the trace_seq to write to
  * @event: the handle to the record's event
@@ -5542,9 +5528,9 @@ void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
  *
  * Writes the parsing of the record's data to @s.
  */
-void pevent_print_event_data(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record)
+void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record)
 {
        static const char *spaces = "                    "; /* 20 spaces */
        int len;
@@ -5556,15 +5542,15 @@ void pevent_print_event_data(struct pevent *pevent, struct trace_seq *s,
        if (len < 20)
                trace_seq_printf(s, "%.*s", 20 - len, spaces);
 
-       pevent_event_info(s, event, record);
+       tep_event_info(s, event, record);
 }
 
-void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
-                       struct pevent_record *record, bool use_trace_clock)
+void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+                    struct tep_record *record, bool use_trace_clock)
 {
        struct event_format *event;
 
-       event = pevent_find_event_by_record(pevent, record);
+       event = tep_find_event_by_record(pevent, record);
        if (!event) {
                int i;
                int type = trace_parse_common_type(pevent, record->data);
@@ -5577,9 +5563,9 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
                return;
        }
 
-       pevent_print_event_task(pevent, s, event, record);
-       pevent_print_event_time(pevent, s, event, record, use_trace_clock);
-       pevent_print_event_data(pevent, s, event, record);
+       tep_print_event_task(pevent, s, event, record);
+       tep_print_event_time(pevent, s, event, record, use_trace_clock);
+       tep_print_event_data(pevent, s, event, record);
 }
 
 static int events_id_cmp(const void *a, const void *b)
@@ -5630,7 +5616,7 @@ static int events_system_cmp(const void *a, const void *b)
        return events_id_cmp(a, b);
 }
 
-struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type sort_type)
+struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort_type sort_type)
 {
        struct event_format **events;
        int (*sort)(const void *a, const void *b);
@@ -5709,13 +5695,13 @@ get_event_fields(const char *type, const char *name,
 }
 
 /**
- * pevent_event_common_fields - return a list of common fields for an event
+ * tep_event_common_fields - return a list of common fields for an event
  * @event: the event to return the common fields of.
  *
  * Returns an allocated array of fields. The last item in the array is NULL.
  * The array must be freed with free().
  */
-struct format_field **pevent_event_common_fields(struct event_format *event)
+struct format_field **tep_event_common_fields(struct event_format *event)
 {
        return get_event_fields("common", event->name,
                                event->format.nr_common,
@@ -5723,13 +5709,13 @@ struct format_field **pevent_event_common_fields(struct event_format *event)
 }
 
 /**
- * pevent_event_fields - return a list of event specific fields for an event
+ * tep_event_fields - return a list of event specific fields for an event
  * @event: the event to return the fields of.
  *
  * Returns an allocated array of fields. The last item in the array is NULL.
  * The array must be freed with free().
  */
-struct format_field **pevent_event_fields(struct event_format *event)
+struct format_field **tep_event_fields(struct event_format *event)
 {
        return get_event_fields("event", event->name,
                                event->format.nr_fields,
@@ -5930,7 +5916,7 @@ static void parse_header_field(const char *field,
 }
 
 /**
- * pevent_parse_header_page - parse the data stored in the header page
+ * tep_parse_header_page - parse the data stored in the header page
  * @pevent: the handle to the pevent
  * @buf: the buffer storing the header page format string
  * @size: the size of @buf
@@ -5941,8 +5927,8 @@ static void parse_header_field(const char *field,
  *
  * /sys/kernel/debug/tracing/events/header_page
  */
-int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
-                            int long_size)
+int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+                         int long_size)
 {
        int ignore;
 
@@ -5994,7 +5980,7 @@ static void free_handler(struct event_handler *handle)
        free(handle);
 }
 
-static int find_event_handle(struct pevent *pevent, struct event_format *event)
+static int find_event_handle(struct tep_handle *pevent, struct event_format *event)
 {
        struct event_handler *handle, **next;
 
@@ -6023,7 +6009,7 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event)
 }
 
 /**
- * __pevent_parse_format - parse the event format
+ * __tep_parse_format - parse the event format
  * @buf: the buffer storing the event format string
  * @size: the size of @buf
  * @sys: the system the event belongs to
@@ -6035,9 +6021,9 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event)
  *
  * /sys/kernel/debug/tracing/events/.../.../format
  */
-enum pevent_errno __pevent_parse_format(struct event_format **eventp,
-                                       struct pevent *pevent, const char *buf,
-                                       unsigned long size, const char *sys)
+enum tep_errno __tep_parse_format(struct event_format **eventp,
+                                 struct tep_handle *pevent, const char *buf,
+                                 unsigned long size, const char *sys)
 {
        struct event_format *event;
        int ret;
@@ -6046,12 +6032,12 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
 
        *eventp = event = alloc_event();
        if (!event)
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
 
        event->name = event_read_name();
        if (!event->name) {
                /* Bad event? */
-               ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                goto event_alloc_failed;
        }
 
@@ -6064,7 +6050,7 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
                
        event->id = event_read_id();
        if (event->id < 0) {
-               ret = PEVENT_ERRNO__READ_ID_FAILED;
+               ret = TEP_ERRNO__READ_ID_FAILED;
                /*
                 * This isn't an allocation error actually.
                 * But as the ID is critical, just bail out.
@@ -6074,7 +6060,7 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
 
        event->system = strdup(sys);
        if (!event->system) {
-               ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                goto event_alloc_failed;
        }
 
@@ -6083,7 +6069,7 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
 
        ret = event_read_format(event);
        if (ret < 0) {
-               ret = PEVENT_ERRNO__READ_FORMAT_FAILED;
+               ret = TEP_ERRNO__READ_FORMAT_FAILED;
                goto event_parse_failed;
        }
 
@@ -6098,7 +6084,7 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
        show_warning = 1;
 
        if (ret < 0) {
-               ret = PEVENT_ERRNO__READ_PRINT_FAILED;
+               ret = TEP_ERRNO__READ_PRINT_FAILED;
                goto event_parse_failed;
        }
 
@@ -6112,14 +6098,14 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
                        arg = alloc_arg();
                        if (!arg) {
                                event->flags |= EVENT_FL_FAILED;
-                               return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED;
+                               return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
                        }
                        arg->type = PRINT_FIELD;
                        arg->field.name = strdup(field->name);
                        if (!arg->field.name) {
                                event->flags |= EVENT_FL_FAILED;
                                free_arg(arg);
-                               return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED;
+                               return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
                        }
                        arg->field.field = field;
                        *list = arg;
@@ -6142,20 +6128,20 @@ enum pevent_errno __pevent_parse_format(struct event_format **eventp,
        return ret;
 }
 
-static enum pevent_errno
-__pevent_parse_event(struct pevent *pevent,
-                    struct event_format **eventp,
-                    const char *buf, unsigned long size,
-                    const char *sys)
+static enum tep_errno
+__parse_event(struct tep_handle *pevent,
+             struct event_format **eventp,
+             const char *buf, unsigned long size,
+             const char *sys)
 {
-       int ret = __pevent_parse_format(eventp, pevent, buf, size, sys);
+       int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
        struct event_format *event = *eventp;
 
        if (event == NULL)
                return ret;
 
        if (pevent && add_event(pevent, event)) {
-               ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                goto event_add_failed;
        }
 
@@ -6166,12 +6152,12 @@ __pevent_parse_event(struct pevent *pevent,
        return 0;
 
 event_add_failed:
-       pevent_free_format(event);
+       tep_free_format(event);
        return ret;
 }
 
 /**
- * pevent_parse_format - parse the event format
+ * tep_parse_format - parse the event format
  * @pevent: the handle to the pevent
  * @eventp: returned format
  * @buf: the buffer storing the event format string
@@ -6185,16 +6171,16 @@ event_add_failed:
  *
  * /sys/kernel/debug/tracing/events/.../.../format
  */
-enum pevent_errno pevent_parse_format(struct pevent *pevent,
-                                     struct event_format **eventp,
-                                     const char *buf,
-                                     unsigned long size, const char *sys)
+enum tep_errno tep_parse_format(struct tep_handle *pevent,
+                               struct event_format **eventp,
+                               const char *buf,
+                               unsigned long size, const char *sys)
 {
-       return __pevent_parse_event(pevent, eventp, buf, size, sys);
+       return __parse_event(pevent, eventp, buf, size, sys);
 }
 
 /**
- * pevent_parse_event - parse the event format
+ * tep_parse_event - parse the event format
  * @pevent: the handle to the pevent
  * @buf: the buffer storing the event format string
  * @size: the size of @buf
@@ -6207,22 +6193,22 @@ enum pevent_errno pevent_parse_format(struct pevent *pevent,
  *
  * /sys/kernel/debug/tracing/events/.../.../format
  */
-enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
-                                    unsigned long size, const char *sys)
+enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+                              unsigned long size, const char *sys)
 {
        struct event_format *event = NULL;
-       return __pevent_parse_event(pevent, &event, buf, size, sys);
+       return __parse_event(pevent, &event, buf, size, sys);
 }
 
 #undef _PE
 #define _PE(code, str) str
-static const char * const pevent_error_str[] = {
-       PEVENT_ERRORS
+static const char * const tep_error_str[] = {
+       TEP_ERRORS
 };
 #undef _PE
 
-int pevent_strerror(struct pevent *pevent __maybe_unused,
-                   enum pevent_errno errnum, char *buf, size_t buflen)
+int tep_strerror(struct tep_handle *pevent __maybe_unused,
+                enum tep_errno errnum, char *buf, size_t buflen)
 {
        int idx;
        const char *msg;
@@ -6232,19 +6218,19 @@ int pevent_strerror(struct pevent *pevent __maybe_unused,
                return 0;
        }
 
-       if (errnum <= __PEVENT_ERRNO__START ||
-           errnum >= __PEVENT_ERRNO__END)
+       if (errnum <= __TEP_ERRNO__START ||
+           errnum >= __TEP_ERRNO__END)
                return -1;
 
-       idx = errnum - __PEVENT_ERRNO__START - 1;
-       msg = pevent_error_str[idx];
+       idx = errnum - __TEP_ERRNO__START - 1;
+       msg = tep_error_str[idx];
        snprintf(buf, buflen, "%s", msg);
 
        return 0;
 }
 
 int get_field_val(struct trace_seq *s, struct format_field *field,
-                 const char *name, struct pevent_record *record,
+                 const char *name, struct tep_record *record,
                  unsigned long long *val, int err)
 {
        if (!field) {
@@ -6253,7 +6239,7 @@ int get_field_val(struct trace_seq *s, struct format_field *field,
                return -1;
        }
 
-       if (pevent_read_number_field(field, record->data, val)) {
+       if (tep_read_number_field(field, record->data, val)) {
                if (err)
                        trace_seq_printf(s, " %s=INVALID", name);
                return -1;
@@ -6263,7 +6249,7 @@ int get_field_val(struct trace_seq *s, struct format_field *field,
 }
 
 /**
- * pevent_get_field_raw - return the raw pointer into the data field
+ * tep_get_field_raw - return the raw pointer into the data field
  * @s: The seq to print to on error
  * @event: the event that the field is for
  * @name: The name of the field
@@ -6276,9 +6262,9 @@ int get_field_val(struct trace_seq *s, struct format_field *field,
  *
  * On failure, it returns NULL.
  */
-void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
-                          const char *name, struct pevent_record *record,
-                          int *len, int err)
+void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
+                       const char *name, struct tep_record *record,
+                       int *len, int err)
 {
        struct format_field *field;
        void *data = record->data;
@@ -6288,7 +6274,7 @@ void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
        if (!event)
                return NULL;
 
-       field = pevent_find_field(event, name);
+       field = tep_find_field(event, name);
 
        if (!field) {
                if (err)
@@ -6302,7 +6288,7 @@ void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
 
        offset = field->offset;
        if (field->flags & FIELD_IS_DYNAMIC) {
-               offset = pevent_read_number(event->pevent,
+               offset = tep_read_number(event->pevent,
                                            data + offset, field->size);
                *len = offset >> 16;
                offset &= 0xffff;
@@ -6313,7 +6299,7 @@ void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
 }
 
 /**
- * pevent_get_field_val - find a field and return its value
+ * tep_get_field_val - find a field and return its value
  * @s: The seq to print to on error
  * @event: the event that the field is for
  * @name: The name of the field
@@ -6323,22 +6309,22 @@ void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
  *
  * Returns 0 on success -1 on field not found.
  */
-int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
-                        const char *name, struct pevent_record *record,
-                        unsigned long long *val, int err)
+int tep_get_field_val(struct trace_seq *s, struct event_format *event,
+                     const char *name, struct tep_record *record,
+                     unsigned long long *val, int err)
 {
        struct format_field *field;
 
        if (!event)
                return -1;
 
-       field = pevent_find_field(event, name);
+       field = tep_find_field(event, name);
 
        return get_field_val(s, field, name, record, val, err);
 }
 
 /**
- * pevent_get_common_field_val - find a common field and return its value
+ * tep_get_common_field_val - find a common field and return its value
  * @s: The seq to print to on error
  * @event: the event that the field is for
  * @name: The name of the field
@@ -6348,22 +6334,22 @@ int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
  *
  * Returns 0 on success -1 on field not found.
  */
-int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
-                               const char *name, struct pevent_record *record,
-                               unsigned long long *val, int err)
+int tep_get_common_field_val(struct trace_seq *s, struct event_format *event,
+                            const char *name, struct tep_record *record,
+                            unsigned long long *val, int err)
 {
        struct format_field *field;
 
        if (!event)
                return -1;
 
-       field = pevent_find_common_field(event, name);
+       field = tep_find_common_field(event, name);
 
        return get_field_val(s, field, name, record, val, err);
 }
 
 /**
- * pevent_get_any_field_val - find a any field and return its value
+ * tep_get_any_field_val - find a any field and return its value
  * @s: The seq to print to on error
  * @event: the event that the field is for
  * @name: The name of the field
@@ -6373,22 +6359,22 @@ int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
  *
  * Returns 0 on success -1 on field not found.
  */
-int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
-                            const char *name, struct pevent_record *record,
-                            unsigned long long *val, int err)
+int tep_get_any_field_val(struct trace_seq *s, struct event_format *event,
+                         const char *name, struct tep_record *record,
+                         unsigned long long *val, int err)
 {
        struct format_field *field;
 
        if (!event)
                return -1;
 
-       field = pevent_find_any_field(event, name);
+       field = tep_find_any_field(event, name);
 
        return get_field_val(s, field, name, record, val, err);
 }
 
 /**
- * pevent_print_num_field - print a field and a format
+ * tep_print_num_field - print a field and a format
  * @s: The seq to print to
  * @fmt: The printf format to print the field with.
  * @event: the event that the field is for
@@ -6398,17 +6384,17 @@ int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
  *
  * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
  */
-int pevent_print_num_field(struct trace_seq *s, const char *fmt,
-                          struct event_format *event, const char *name,
-                          struct pevent_record *record, int err)
+int tep_print_num_field(struct trace_seq *s, const char *fmt,
+                       struct event_format *event, const char *name,
+                       struct tep_record *record, int err)
 {
-       struct format_field *field = pevent_find_field(event, name);
+       struct format_field *field = tep_find_field(event, name);
        unsigned long long val;
 
        if (!field)
                goto failed;
 
-       if (pevent_read_number_field(field, record->data, &val))
+       if (tep_read_number_field(field, record->data, &val))
                goto failed;
 
        return trace_seq_printf(s, fmt, val);
@@ -6420,7 +6406,7 @@ int pevent_print_num_field(struct trace_seq *s, const char *fmt,
 }
 
 /**
- * pevent_print_func_field - print a field and a format for function pointers
+ * tep_print_func_field - print a field and a format for function pointers
  * @s: The seq to print to
  * @fmt: The printf format to print the field with.
  * @event: the event that the field is for
@@ -6430,12 +6416,12 @@ int pevent_print_num_field(struct trace_seq *s, const char *fmt,
  *
  * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
  */
-int pevent_print_func_field(struct trace_seq *s, const char *fmt,
-                           struct event_format *event, const char *name,
-                           struct pevent_record *record, int err)
+int tep_print_func_field(struct trace_seq *s, const char *fmt,
+                        struct event_format *event, const char *name,
+                        struct tep_record *record, int err)
 {
-       struct format_field *field = pevent_find_field(event, name);
-       struct pevent *pevent = event->pevent;
+       struct format_field *field = tep_find_field(event, name);
+       struct tep_handle *pevent = event->pevent;
        unsigned long long val;
        struct func_map *func;
        char tmp[128];
@@ -6443,7 +6429,7 @@ int pevent_print_func_field(struct trace_seq *s, const char *fmt,
        if (!field)
                goto failed;
 
-       if (pevent_read_number_field(field, record->data, &val))
+       if (tep_read_number_field(field, record->data, &val))
                goto failed;
 
        func = find_func(pevent, val);
@@ -6461,9 +6447,9 @@ int pevent_print_func_field(struct trace_seq *s, const char *fmt,
        return -1;
 }
 
-static void free_func_handle(struct pevent_function_handler *func)
+static void free_func_handle(struct tep_function_handler *func)
 {
-       struct pevent_func_params *params;
+       struct func_params *params;
 
        free(func->name);
 
@@ -6477,29 +6463,29 @@ static void free_func_handle(struct pevent_function_handler *func)
 }
 
 /**
- * pevent_register_print_function - register a helper function
+ * tep_register_print_function - register a helper function
  * @pevent: the handle to the pevent
  * @func: the function to process the helper function
  * @ret_type: the return type of the helper function
  * @name: the name of the helper function
- * @parameters: A list of enum pevent_func_arg_type
+ * @parameters: A list of enum tep_func_arg_type
  *
  * Some events may have helper functions in the print format arguments.
  * This allows a plugin to dynamically create a way to process one
  * of these functions.
  *
- * The @parameters is a variable list of pevent_func_arg_type enums that
- * must end with PEVENT_FUNC_ARG_VOID.
+ * The @parameters is a variable list of tep_func_arg_type enums that
+ * must end with TEP_FUNC_ARG_VOID.
  */
-int pevent_register_print_function(struct pevent *pevent,
-                                  pevent_func_handler func,
-                                  enum pevent_func_arg_type ret_type,
-                                  char *name, ...)
-{
-       struct pevent_function_handler *func_handle;
-       struct pevent_func_params **next_param;
-       struct pevent_func_params *param;
-       enum pevent_func_arg_type type;
+int tep_register_print_function(struct tep_handle *pevent,
+                               tep_func_handler func,
+                               enum tep_func_arg_type ret_type,
+                               char *name, ...)
+{
+       struct tep_function_handler *func_handle;
+       struct func_params **next_param;
+       struct func_params *param;
+       enum tep_func_arg_type type;
        va_list ap;
        int ret;
 
@@ -6517,7 +6503,7 @@ int pevent_register_print_function(struct pevent *pevent,
        func_handle = calloc(1, sizeof(*func_handle));
        if (!func_handle) {
                do_warning("Failed to allocate function handler");
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
        }
 
        func_handle->ret_type = ret_type;
@@ -6526,26 +6512,26 @@ int pevent_register_print_function(struct pevent *pevent,
        if (!func_handle->name) {
                do_warning("Failed to allocate function name");
                free(func_handle);
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
        }
 
        next_param = &(func_handle->params);
        va_start(ap, name);
        for (;;) {
-               type = va_arg(ap, enum pevent_func_arg_type);
-               if (type == PEVENT_FUNC_ARG_VOID)
+               type = va_arg(ap, enum tep_func_arg_type);
+               if (type == TEP_FUNC_ARG_VOID)
                        break;
 
-               if (type >= PEVENT_FUNC_ARG_MAX_TYPES) {
+               if (type >= TEP_FUNC_ARG_MAX_TYPES) {
                        do_warning("Invalid argument type %d", type);
-                       ret = PEVENT_ERRNO__INVALID_ARG_TYPE;
+                       ret = TEP_ERRNO__INVALID_ARG_TYPE;
                        goto out_free;
                }
 
                param = malloc(sizeof(*param));
                if (!param) {
                        do_warning("Failed to allocate function param");
-                       ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                        goto out_free;
                }
                param->type = type;
@@ -6569,7 +6555,7 @@ int pevent_register_print_function(struct pevent *pevent,
 }
 
 /**
- * pevent_unregister_print_function - unregister a helper function
+ * tep_unregister_print_function - unregister a helper function
  * @pevent: the handle to the pevent
  * @func: the function to process the helper function
  * @name: the name of the helper function
@@ -6578,10 +6564,10 @@ int pevent_register_print_function(struct pevent *pevent,
  *
  * Returns 0 if the handler was removed successully, -1 otherwise.
  */
-int pevent_unregister_print_function(struct pevent *pevent,
-                                    pevent_func_handler func, char *name)
+int tep_unregister_print_function(struct tep_handle *pevent,
+                                 tep_func_handler func, char *name)
 {
-       struct pevent_function_handler *func_handle;
+       struct tep_function_handler *func_handle;
 
        func_handle = find_func_handler(pevent, name);
        if (func_handle && func_handle->func == func) {
@@ -6591,15 +6577,15 @@ int pevent_unregister_print_function(struct pevent *pevent,
        return -1;
 }
 
-static struct event_format *pevent_search_event(struct pevent *pevent, int id,
-                                               const char *sys_name,
-                                               const char *event_name)
+static struct event_format *search_event(struct tep_handle *pevent, int id,
+                                        const char *sys_name,
+                                        const char *event_name)
 {
        struct event_format *event;
 
        if (id >= 0) {
                /* search by id */
-               event = pevent_find_event(pevent, id);
+               event = tep_find_event(pevent, id);
                if (!event)
                        return NULL;
                if (event_name && (strcmp(event_name, event->name) != 0))
@@ -6607,7 +6593,7 @@ static struct event_format *pevent_search_event(struct pevent *pevent, int id,
                if (sys_name && (strcmp(sys_name, event->system) != 0))
                        return NULL;
        } else {
-               event = pevent_find_event_by_name(pevent, sys_name, event_name);
+               event = tep_find_event_by_name(pevent, sys_name, event_name);
                if (!event)
                        return NULL;
        }
@@ -6615,7 +6601,7 @@ static struct event_format *pevent_search_event(struct pevent *pevent, int id,
 }
 
 /**
- * pevent_register_event_handler - register a way to parse an event
+ * tep_register_event_handler - register a way to parse an event
  * @pevent: the handle to the pevent
  * @id: the id of the event to register
  * @sys_name: the system name the event belongs to
@@ -6631,14 +6617,14 @@ static struct event_format *pevent_search_event(struct pevent *pevent, int id,
  * If @id is >= 0, then it is used to find the event.
  * else @sys_name and @event_name are used.
  */
-int pevent_register_event_handler(struct pevent *pevent, int id,
-                                 const char *sys_name, const char *event_name,
-                                 pevent_event_handler_func func, void *context)
+int tep_register_event_handler(struct tep_handle *pevent, int id,
+                              const char *sys_name, const char *event_name,
+                              tep_event_handler_func func, void *context)
 {
        struct event_format *event;
        struct event_handler *handle;
 
-       event = pevent_search_event(pevent, id, sys_name, event_name);
+       event = search_event(pevent, id, sys_name, event_name);
        if (event == NULL)
                goto not_found;
 
@@ -6654,7 +6640,7 @@ int pevent_register_event_handler(struct pevent *pevent, int id,
        handle = calloc(1, sizeof(*handle));
        if (!handle) {
                do_warning("Failed to allocate event handler");
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
        }
 
        handle->id = id;
@@ -6669,7 +6655,7 @@ int pevent_register_event_handler(struct pevent *pevent, int id,
                free((void *)handle->event_name);
                free((void *)handle->sys_name);
                free(handle);
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
        }
 
        handle->func = func;
@@ -6682,7 +6668,7 @@ int pevent_register_event_handler(struct pevent *pevent, int id,
 
 static int handle_matches(struct event_handler *handler, int id,
                          const char *sys_name, const char *event_name,
-                         pevent_event_handler_func func, void *context)
+                         tep_event_handler_func func, void *context)
 {
        if (id >= 0 && id != handler->id)
                return 0;
@@ -6700,7 +6686,7 @@ static int handle_matches(struct event_handler *handler, int id,
 }
 
 /**
- * pevent_unregister_event_handler - unregister an existing event handler
+ * tep_unregister_event_handler - unregister an existing event handler
  * @pevent: the handle to the pevent
  * @id: the id of the event to unregister
  * @sys_name: the system name the handler belongs to
@@ -6715,15 +6701,15 @@ static int handle_matches(struct event_handler *handler, int id,
  *
  * Returns 0 if handler was removed successfully, -1 if event was not found.
  */
-int pevent_unregister_event_handler(struct pevent *pevent, int id,
-                                   const char *sys_name, const char *event_name,
-                                   pevent_event_handler_func func, void *context)
+int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+                                const char *sys_name, const char *event_name,
+                                tep_event_handler_func func, void *context)
 {
        struct event_format *event;
        struct event_handler *handle;
        struct event_handler **next;
 
-       event = pevent_search_event(pevent, id, sys_name, event_name);
+       event = search_event(pevent, id, sys_name, event_name);
        if (event == NULL)
                goto not_found;
 
@@ -6754,11 +6740,11 @@ not_found:
 }
 
 /**
- * pevent_alloc - create a pevent handle
+ * tep_alloc - create a pevent handle
  */
-struct pevent *pevent_alloc(void)
+struct tep_handle *tep_alloc(void)
 {
-       struct pevent *pevent = calloc(1, sizeof(*pevent));
+       struct tep_handle *pevent = calloc(1, sizeof(*pevent));
 
        if (pevent)
                pevent->ref_count = 1;
@@ -6766,12 +6752,12 @@ struct pevent *pevent_alloc(void)
        return pevent;
 }
 
-void pevent_ref(struct pevent *pevent)
+void tep_ref(struct tep_handle *pevent)
 {
        pevent->ref_count++;
 }
 
-void pevent_free_format_field(struct format_field *field)
+void tep_free_format_field(struct format_field *field)
 {
        free(field->type);
        if (field->alias != field->name)
@@ -6786,7 +6772,7 @@ static void free_format_fields(struct format_field *field)
 
        while (field) {
                next = field->next;
-               pevent_free_format_field(field);
+               tep_free_format_field(field);
                field = next;
        }
 }
@@ -6797,7 +6783,7 @@ static void free_formats(struct format *format)
        free_format_fields(format->fields);
 }
 
-void pevent_free_format(struct event_format *event)
+void tep_free_format(struct event_format *event)
 {
        free(event->name);
        free(event->system);
@@ -6811,15 +6797,15 @@ void pevent_free_format(struct event_format *event)
 }
 
 /**
- * pevent_free - free a pevent handle
+ * tep_free - free a pevent handle
  * @pevent: the pevent handle to free
  */
-void pevent_free(struct pevent *pevent)
+void tep_free(struct tep_handle *pevent)
 {
        struct cmdline_list *cmdlist, *cmdnext;
        struct func_list *funclist, *funcnext;
        struct printk_list *printklist, *printknext;
-       struct pevent_function_handler *func_handler;
+       struct tep_function_handler *func_handler;
        struct event_handler *handle;
        int i;
 
@@ -6883,7 +6869,7 @@ void pevent_free(struct pevent *pevent)
        }
 
        for (i = 0; i < pevent->nr_events; i++)
-               pevent_free_format(pevent->events[i]);
+               tep_free_format(pevent->events[i]);
 
        while (pevent->handlers) {
                handle = pevent->handlers;
@@ -6899,7 +6885,7 @@ void pevent_free(struct pevent *pevent)
        free(pevent);
 }
 
-void pevent_unref(struct pevent *pevent)
+void tep_unref(struct tep_handle *pevent)
 {
-       pevent_free(pevent);
+       tep_free(pevent);
 }
index 0c03538df74c01a1ecedc353e21b6c81083ee1e1..44b7c2d41f9fca7f912008ad743f7f5105f74751 100644 (file)
@@ -41,7 +41,7 @@
 #define DEBUG_RECORD 0
 #endif
 
-struct pevent_record {
+struct tep_record {
        unsigned long long      ts;
        unsigned long long      offset;
        long long               missed_events;  /* buffer dropped events before */
@@ -53,8 +53,8 @@ struct pevent_record {
        int                     locked;         /* Do not free, even if ref_count is zero */
        void                    *priv;
 #if DEBUG_RECORD
-       struct pevent_record    *prev;
-       struct pevent_record    *next;
+       struct tep_record       *prev;
+       struct tep_record       *next;
        long                    alloc_addr;
 #endif
 };
@@ -98,19 +98,19 @@ extern int trace_seq_do_printf(struct trace_seq *s);
 
 /* ----------------------- pevent ----------------------- */
 
-struct pevent;
+struct tep_handle;
 struct event_format;
 
-typedef int (*pevent_event_handler_func)(struct trace_seq *s,
-                                        struct pevent_record *record,
-                                        struct event_format *event,
-                                        void *context);
+typedef int (*tep_event_handler_func)(struct trace_seq *s,
+                                     struct tep_record *record,
+                                     struct event_format *event,
+                                     void *context);
 
-typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
-typedef int (*pevent_plugin_unload_func)(struct pevent *pevent);
+typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
+typedef int (*tep_plugin_unload_func)(struct tep_handle *pevent);
 
-struct pevent_plugin_option {
-       struct pevent_plugin_option     *next;
+struct tep_plugin_option {
+       struct tep_plugin_option        *next;
        void                            *handle;
        char                            *file;
        char                            *name;
@@ -124,20 +124,20 @@ struct pevent_plugin_option {
 /*
  * Plugin hooks that can be called:
  *
- * PEVENT_PLUGIN_LOADER:  (required)
+ * TEP_PLUGIN_LOADER:  (required)
  *   The function name to initialized the plugin.
  *
- *   int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+ *   int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
  *
- * PEVENT_PLUGIN_UNLOADER:  (optional)
+ * TEP_PLUGIN_UNLOADER:  (optional)
  *   The function called just before unloading
  *
- *   int PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+ *   int TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
  *
- * PEVENT_PLUGIN_OPTIONS:  (optional)
+ * TEP_PLUGIN_OPTIONS:  (optional)
  *   Plugin options that can be set before loading
  *
- *   struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
+ *   struct tep_plugin_option TEP_PLUGIN_OPTIONS[] = {
  *     {
  *             .name = "option-name",
  *             .plugin_alias = "override-file-name", (optional)
@@ -158,19 +158,19 @@ struct pevent_plugin_option {
  *   .set will be processed. If .value is defined, then it is considered
  *   a string option and .set will be ignored.
  *
- * PEVENT_PLUGIN_ALIAS: (optional)
+ * TEP_PLUGIN_ALIAS: (optional)
  *   The name to use for finding options (uses filename if not defined)
  */
-#define PEVENT_PLUGIN_LOADER pevent_plugin_loader
-#define PEVENT_PLUGIN_UNLOADER pevent_plugin_unloader
-#define PEVENT_PLUGIN_OPTIONS pevent_plugin_options
-#define PEVENT_PLUGIN_ALIAS pevent_plugin_alias
+#define TEP_PLUGIN_LOADER tep_plugin_loader
+#define TEP_PLUGIN_UNLOADER tep_plugin_unloader
+#define TEP_PLUGIN_OPTIONS tep_plugin_options
+#define TEP_PLUGIN_ALIAS tep_plugin_alias
 #define _MAKE_STR(x)   #x
 #define MAKE_STR(x)    _MAKE_STR(x)
-#define PEVENT_PLUGIN_LOADER_NAME MAKE_STR(PEVENT_PLUGIN_LOADER)
-#define PEVENT_PLUGIN_UNLOADER_NAME MAKE_STR(PEVENT_PLUGIN_UNLOADER)
-#define PEVENT_PLUGIN_OPTIONS_NAME MAKE_STR(PEVENT_PLUGIN_OPTIONS)
-#define PEVENT_PLUGIN_ALIAS_NAME MAKE_STR(PEVENT_PLUGIN_ALIAS)
+#define TEP_PLUGIN_LOADER_NAME MAKE_STR(TEP_PLUGIN_LOADER)
+#define TEP_PLUGIN_UNLOADER_NAME MAKE_STR(TEP_PLUGIN_UNLOADER)
+#define TEP_PLUGIN_OPTIONS_NAME MAKE_STR(TEP_PLUGIN_OPTIONS)
+#define TEP_PLUGIN_ALIAS_NAME MAKE_STR(TEP_PLUGIN_ALIAS)
 
 enum format_flags {
        FIELD_IS_ARRAY          = 1,
@@ -269,10 +269,10 @@ struct print_arg_op {
        struct print_arg        *right;
 };
 
-struct pevent_function_handler;
+struct tep_function_handler;
 
 struct print_arg_func {
-       struct pevent_function_handler  *func;
+       struct tep_function_handler     *func;
        struct print_arg                *args;
 };
 
@@ -320,14 +320,14 @@ struct print_fmt {
 };
 
 struct event_format {
-       struct pevent           *pevent;
+       struct tep_handle       *pevent;
        char                    *name;
        int                     id;
        int                     flags;
        struct format           format;
        struct print_fmt        print_fmt;
        char                    *system;
-       pevent_event_handler_func handler;
+       tep_event_handler_func  handler;
        void                    *context;
 };
 
@@ -361,25 +361,25 @@ enum event_type {
        EVENT_SQUOTE,
 };
 
-typedef unsigned long long (*pevent_func_handler)(struct trace_seq *s,
-                                            unsigned long long *args);
+typedef unsigned long long (*tep_func_handler)(struct trace_seq *s,
+                                              unsigned long long *args);
 
-enum pevent_func_arg_type {
-       PEVENT_FUNC_ARG_VOID,
-       PEVENT_FUNC_ARG_INT,
-       PEVENT_FUNC_ARG_LONG,
-       PEVENT_FUNC_ARG_STRING,
-       PEVENT_FUNC_ARG_PTR,
-       PEVENT_FUNC_ARG_MAX_TYPES
+enum tep_func_arg_type {
+       TEP_FUNC_ARG_VOID,
+       TEP_FUNC_ARG_INT,
+       TEP_FUNC_ARG_LONG,
+       TEP_FUNC_ARG_STRING,
+       TEP_FUNC_ARG_PTR,
+       TEP_FUNC_ARG_MAX_TYPES
 };
 
-enum pevent_flag {
-       PEVENT_NSEC_OUTPUT              = 1,    /* output in NSECS */
-       PEVENT_DISABLE_SYS_PLUGINS      = 1 << 1,
-       PEVENT_DISABLE_PLUGINS          = 1 << 2,
+enum tep_flag {
+       TEP_NSEC_OUTPUT         = 1,    /* output in NSECS */
+       TEP_DISABLE_SYS_PLUGINS = 1 << 1,
+       TEP_DISABLE_PLUGINS     = 1 << 2,
 };
 
-#define PEVENT_ERRORS                                                        \
+#define TEP_ERRORS                                                           \
        _PE(MEM_ALLOC_FAILED,   "failed to allocate memory"),                 \
        _PE(PARSE_EVENT_FAILED, "failed to parse event"),                     \
        _PE(READ_ID_FAILED,     "failed to read event id"),                   \
@@ -411,10 +411,10 @@ enum pevent_flag {
        _PE(FILTER_MISS,        "record does not match to filter")
 
 #undef _PE
-#define _PE(__code, __str) PEVENT_ERRNO__ ## __code
-enum pevent_errno {
-       PEVENT_ERRNO__SUCCESS                   = 0,
-       PEVENT_ERRNO__FILTER_MATCH              = PEVENT_ERRNO__SUCCESS,
+#define _PE(__code, __str) TEP_ERRNO__ ## __code
+enum tep_errno {
+       TEP_ERRNO__SUCCESS                      = 0,
+       TEP_ERRNO__FILTER_MATCH                 = TEP_ERRNO__SUCCESS,
 
        /*
         * Choose an arbitrary negative big number not to clash with standard
@@ -423,11 +423,11 @@ enum pevent_errno {
         *
         * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
         */
-       __PEVENT_ERRNO__START                   = -100000,
+       __TEP_ERRNO__START                      = -100000,
 
-       PEVENT_ERRORS,
+       TEP_ERRORS,
 
-       __PEVENT_ERRNO__END,
+       __TEP_ERRNO__END,
 };
 #undef _PE
 
@@ -435,17 +435,17 @@ struct plugin_list;
 
 #define INVALID_PLUGIN_LIST_OPTION     ((char **)((unsigned long)-1))
 
-struct plugin_list *traceevent_load_plugins(struct pevent *pevent);
-void traceevent_unload_plugins(struct plugin_list *plugin_list,
-                              struct pevent *pevent);
-char **traceevent_plugin_list_options(void);
-void traceevent_plugin_free_options_list(char **list);
-int traceevent_plugin_add_options(const char *name,
-                                 struct pevent_plugin_option *options);
-void traceevent_plugin_remove_options(struct pevent_plugin_option *options);
-void traceevent_print_plugins(struct trace_seq *s,
-                             const char *prefix, const char *suffix,
-                             const struct plugin_list *list);
+struct plugin_list *tep_load_plugins(struct tep_handle *pevent);
+void tep_unload_plugins(struct plugin_list *plugin_list,
+                       struct tep_handle *pevent);
+char **tep_plugin_list_options(void);
+void tep_plugin_free_options_list(char **list);
+int tep_plugin_add_options(const char *name,
+                          struct tep_plugin_option *options);
+void tep_plugin_remove_options(struct tep_plugin_option *options);
+void tep_print_plugins(struct trace_seq *s,
+                       const char *prefix, const char *suffix,
+                       const struct plugin_list *list);
 
 struct cmdline;
 struct cmdline_list;
@@ -454,10 +454,10 @@ struct func_list;
 struct event_handler;
 struct func_resolver;
 
-typedef char *(pevent_func_resolver_t)(void *priv,
-                                      unsigned long long *addrp, char **modp);
+typedef char *(tep_func_resolver_t)(void *priv,
+                                   unsigned long long *addrp, char **modp);
 
-struct pevent {
+struct tep_handle {
        int ref_count;
 
        int header_page_ts_offset;
@@ -524,7 +524,7 @@ struct pevent {
        struct format_field *bprint_buf_field;
 
        struct event_handler *handlers;
-       struct pevent_function_handler *func_handlers;
+       struct tep_function_handler *func_handlers;
 
        /* cache */
        struct event_format *last_event;
@@ -532,13 +532,13 @@ struct pevent {
        char *trace_clock;
 };
 
-static inline void pevent_set_flag(struct pevent *pevent, int flag)
+static inline void tep_set_flag(struct tep_handle *pevent, int flag)
 {
        pevent->flags |= flag;
 }
 
 static inline unsigned short
-__data2host2(struct pevent *pevent, unsigned short data)
+__data2host2(struct tep_handle *pevent, unsigned short data)
 {
        unsigned short swap;
 
@@ -552,7 +552,7 @@ __data2host2(struct pevent *pevent, unsigned short data)
 }
 
 static inline unsigned int
-__data2host4(struct pevent *pevent, unsigned int data)
+__data2host4(struct tep_handle *pevent, unsigned int data)
 {
        unsigned int swap;
 
@@ -568,7 +568,7 @@ __data2host4(struct pevent *pevent, unsigned int data)
 }
 
 static inline unsigned long long
-__data2host8(struct pevent *pevent, unsigned long long data)
+__data2host8(struct tep_handle *pevent, unsigned long long data)
 {
        unsigned long long swap;
 
@@ -597,7 +597,7 @@ __data2host8(struct pevent *pevent, unsigned long long data)
        __data2host8(pevent, __val);                            \
 })
 
-static inline int traceevent_host_bigendian(void)
+static inline int tep_host_bigendian(void)
 {
        unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
        unsigned int val;
@@ -615,198 +615,198 @@ enum trace_flag_type {
        TRACE_FLAG_SOFTIRQ              = 0x10,
 };
 
-int pevent_set_function_resolver(struct pevent *pevent,
-                                pevent_func_resolver_t *func, void *priv);
-void pevent_reset_function_resolver(struct pevent *pevent);
-int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
-int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock);
-int pevent_register_function(struct pevent *pevent, char *name,
-                            unsigned long long addr, char *mod);
-int pevent_register_print_string(struct pevent *pevent, const char *fmt,
-                                unsigned long long addr);
-int pevent_pid_is_registered(struct pevent *pevent, int pid);
-
-void pevent_print_event_task(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record);
-void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record,
-                            bool use_trace_clock);
-void pevent_print_event_data(struct pevent *pevent, struct trace_seq *s,
-                            struct event_format *event,
-                            struct pevent_record *record);
-void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
-                       struct pevent_record *record, bool use_trace_clock);
-
-int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size,
-                            int long_size);
-
-enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf,
-                                    unsigned long size, const char *sys);
-enum pevent_errno pevent_parse_format(struct pevent *pevent,
-                                     struct event_format **eventp,
-                                     const char *buf,
-                                     unsigned long size, const char *sys);
-void pevent_free_format(struct event_format *event);
-void pevent_free_format_field(struct format_field *field);
-
-void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event,
-                          const char *name, struct pevent_record *record,
-                          int *len, int err);
-
-int pevent_get_field_val(struct trace_seq *s, struct event_format *event,
-                        const char *name, struct pevent_record *record,
-                        unsigned long long *val, int err);
-int pevent_get_common_field_val(struct trace_seq *s, struct event_format *event,
-                               const char *name, struct pevent_record *record,
-                               unsigned long long *val, int err);
-int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
-                            const char *name, struct pevent_record *record,
+int tep_set_function_resolver(struct tep_handle *pevent,
+                             tep_func_resolver_t *func, void *priv);
+void tep_reset_function_resolver(struct tep_handle *pevent);
+int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
+int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
+int tep_register_function(struct tep_handle *pevent, char *name,
+                         unsigned long long addr, char *mod);
+int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
+                             unsigned long long addr);
+int tep_pid_is_registered(struct tep_handle *pevent, int pid);
+
+void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record);
+void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record,
+                         bool use_trace_clock);
+void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
+                         struct event_format *event,
+                         struct tep_record *record);
+void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
+                    struct tep_record *record, bool use_trace_clock);
+
+int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long size,
+                         int long_size);
+
+enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
+                              unsigned long size, const char *sys);
+enum tep_errno tep_parse_format(struct tep_handle *pevent,
+                               struct event_format **eventp,
+                               const char *buf,
+                               unsigned long size, const char *sys);
+void tep_free_format(struct event_format *event);
+void tep_free_format_field(struct format_field *field);
+
+void *tep_get_field_raw(struct trace_seq *s, struct event_format *event,
+                       const char *name, struct tep_record *record,
+                       int *len, int err);
+
+int tep_get_field_val(struct trace_seq *s, struct event_format *event,
+                     const char *name, struct tep_record *record,
+                     unsigned long long *val, int err);
+int tep_get_common_field_val(struct trace_seq *s, struct event_format *event,
+                            const char *name, struct tep_record *record,
                             unsigned long long *val, int err);
+int tep_get_any_field_val(struct trace_seq *s, struct event_format *event,
+                         const char *name, struct tep_record *record,
+                         unsigned long long *val, int err);
 
-int pevent_print_num_field(struct trace_seq *s, const char *fmt,
+int tep_print_num_field(struct trace_seq *s, const char *fmt,
                           struct event_format *event, const char *name,
-                          struct pevent_record *record, int err);
-
-int pevent_print_func_field(struct trace_seq *s, const char *fmt,
-                          struct event_format *event, const char *name,
-                          struct pevent_record *record, int err);
-
-int pevent_register_event_handler(struct pevent *pevent, int id,
-                                 const char *sys_name, const char *event_name,
-                                 pevent_event_handler_func func, void *context);
-int pevent_unregister_event_handler(struct pevent *pevent, int id,
-                                   const char *sys_name, const char *event_name,
-                                   pevent_event_handler_func func, void *context);
-int pevent_register_print_function(struct pevent *pevent,
-                                  pevent_func_handler func,
-                                  enum pevent_func_arg_type ret_type,
-                                  char *name, ...);
-int pevent_unregister_print_function(struct pevent *pevent,
-                                    pevent_func_handler func, char *name);
-
-struct format_field *pevent_find_common_field(struct event_format *event, const char *name);
-struct format_field *pevent_find_field(struct event_format *event, const char *name);
-struct format_field *pevent_find_any_field(struct event_format *event, const char *name);
-
-const char *pevent_find_function(struct pevent *pevent, unsigned long long addr);
+                          struct tep_record *record, int err);
+
+int tep_print_func_field(struct trace_seq *s, const char *fmt,
+                        struct event_format *event, const char *name,
+                        struct tep_record *record, int err);
+
+int tep_register_event_handler(struct tep_handle *pevent, int id,
+                              const char *sys_name, const char *event_name,
+                              tep_event_handler_func func, void *context);
+int tep_unregister_event_handler(struct tep_handle *pevent, int id,
+                                const char *sys_name, const char *event_name,
+                                tep_event_handler_func func, void *context);
+int tep_register_print_function(struct tep_handle *pevent,
+                               tep_func_handler func,
+                               enum tep_func_arg_type ret_type,
+                               char *name, ...);
+int tep_unregister_print_function(struct tep_handle *pevent,
+                                 tep_func_handler func, char *name);
+
+struct format_field *tep_find_common_field(struct event_format *event, const char *name);
+struct format_field *tep_find_field(struct event_format *event, const char *name);
+struct format_field *tep_find_any_field(struct event_format *event, const char *name);
+
+const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
 unsigned long long
-pevent_find_function_address(struct pevent *pevent, unsigned long long addr);
-unsigned long long pevent_read_number(struct pevent *pevent, const void *ptr, int size);
-int pevent_read_number_field(struct format_field *field, const void *data,
-                            unsigned long long *value);
+tep_find_function_address(struct tep_handle *pevent, unsigned long long addr);
+unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, int size);
+int tep_read_number_field(struct format_field *field, const void *data,
+                         unsigned long long *value);
 
-struct event_format *pevent_find_event(struct pevent *pevent, int id);
+struct event_format *tep_find_event(struct tep_handle *pevent, int id);
 
 struct event_format *
-pevent_find_event_by_name(struct pevent *pevent, const char *sys, const char *name);
+tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
 
 struct event_format *
-pevent_find_event_by_record(struct pevent *pevent, struct pevent_record *record);
-
-void pevent_data_lat_fmt(struct pevent *pevent,
-                        struct trace_seq *s, struct pevent_record *record);
-int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
-struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
-int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
-int pevent_data_preempt_count(struct pevent *pevent, struct pevent_record *rec);
-int pevent_data_flags(struct pevent *pevent, struct pevent_record *rec);
-const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
+tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
+
+void tep_data_lat_fmt(struct tep_handle *pevent,
+                     struct trace_seq *s, struct tep_record *record);
+int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
+struct event_format *tep_data_event_from_type(struct tep_handle *pevent, int type);
+int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
+int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
+int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
+const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
 struct cmdline;
-struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
-                                         struct cmdline *next);
-int pevent_cmdline_pid(struct pevent *pevent, struct cmdline *cmdline);
-
-void pevent_print_field(struct trace_seq *s, void *data,
-                       struct format_field *field);
-void pevent_print_fields(struct trace_seq *s, void *data,
-                        int size __maybe_unused, struct event_format *event);
-void pevent_event_info(struct trace_seq *s, struct event_format *event,
-                      struct pevent_record *record);
-int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum,
+struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
+                                      struct cmdline *next);
+int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
+
+void tep_print_field(struct trace_seq *s, void *data,
+                    struct format_field *field);
+void tep_print_fields(struct trace_seq *s, void *data,
+                     int size __maybe_unused, struct event_format *event);
+void tep_event_info(struct trace_seq *s, struct event_format *event,
+                      struct tep_record *record);
+int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
                    char *buf, size_t buflen);
 
-struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type);
-struct format_field **pevent_event_common_fields(struct event_format *event);
-struct format_field **pevent_event_fields(struct event_format *event);
+struct event_format **tep_list_events(struct tep_handle *pevent, enum event_sort_type);
+struct format_field **tep_event_common_fields(struct event_format *event);
+struct format_field **tep_event_fields(struct event_format *event);
 
-static inline int pevent_get_cpus(struct pevent *pevent)
+static inline int tep_get_cpus(struct tep_handle *pevent)
 {
        return pevent->cpus;
 }
 
-static inline void pevent_set_cpus(struct pevent *pevent, int cpus)
+static inline void tep_set_cpus(struct tep_handle *pevent, int cpus)
 {
        pevent->cpus = cpus;
 }
 
-static inline int pevent_get_long_size(struct pevent *pevent)
+static inline int tep_get_long_size(struct tep_handle *pevent)
 {
        return pevent->long_size;
 }
 
-static inline void pevent_set_long_size(struct pevent *pevent, int long_size)
+static inline void tep_set_long_size(struct tep_handle *pevent, int long_size)
 {
        pevent->long_size = long_size;
 }
 
-static inline int pevent_get_page_size(struct pevent *pevent)
+static inline int tep_get_page_size(struct tep_handle *pevent)
 {
        return pevent->page_size;
 }
 
-static inline void pevent_set_page_size(struct pevent *pevent, int _page_size)
+static inline void tep_set_page_size(struct tep_handle *pevent, int _page_size)
 {
        pevent->page_size = _page_size;
 }
 
-static inline int pevent_is_file_bigendian(struct pevent *pevent)
+static inline int tep_is_file_bigendian(struct tep_handle *pevent)
 {
        return pevent->file_bigendian;
 }
 
-static inline void pevent_set_file_bigendian(struct pevent *pevent, int endian)
+static inline void tep_set_file_bigendian(struct tep_handle *pevent, int endian)
 {
        pevent->file_bigendian = endian;
 }
 
-static inline int pevent_is_host_bigendian(struct pevent *pevent)
+static inline int tep_is_host_bigendian(struct tep_handle *pevent)
 {
        return pevent->host_bigendian;
 }
 
-static inline void pevent_set_host_bigendian(struct pevent *pevent, int endian)
+static inline void tep_set_host_bigendian(struct tep_handle *pevent, int endian)
 {
        pevent->host_bigendian = endian;
 }
 
-static inline int pevent_is_latency_format(struct pevent *pevent)
+static inline int tep_is_latency_format(struct tep_handle *pevent)
 {
        return pevent->latency_format;
 }
 
-static inline void pevent_set_latency_format(struct pevent *pevent, int lat)
+static inline void tep_set_latency_format(struct tep_handle *pevent, int lat)
 {
        pevent->latency_format = lat;
 }
 
-struct pevent *pevent_alloc(void);
-void pevent_free(struct pevent *pevent);
-void pevent_ref(struct pevent *pevent);
-void pevent_unref(struct pevent *pevent);
+struct tep_handle *tep_alloc(void);
+void tep_free(struct tep_handle *pevent);
+void tep_ref(struct tep_handle *pevent);
+void tep_unref(struct tep_handle *pevent);
 
 /* access to the internal parser */
-void pevent_buffer_init(const char *buf, unsigned long long size);
-enum event_type pevent_read_token(char **tok);
-void pevent_free_token(char *token);
-int pevent_peek_char(void);
-const char *pevent_get_input_buf(void);
-unsigned long long pevent_get_input_buf_ptr(void);
+void tep_buffer_init(const char *buf, unsigned long long size);
+enum event_type tep_read_token(char **tok);
+void tep_free_token(char *token);
+int tep_peek_char(void);
+const char *tep_get_input_buf(void);
+unsigned long long tep_get_input_buf_ptr(void);
 
 /* for debugging */
-void pevent_print_funcs(struct pevent *pevent);
-void pevent_print_printk(struct pevent *pevent);
+void tep_print_funcs(struct tep_handle *pevent);
+void tep_print_printk(struct tep_handle *pevent);
 
 /* ----------------------- filtering ----------------------- */
 
@@ -930,22 +930,22 @@ struct filter_type {
        struct filter_arg       *filter;
 };
 
-#define PEVENT_FILTER_ERROR_BUFSZ  1024
+#define TEP_FILTER_ERROR_BUFSZ  1024
 
 struct event_filter {
-       struct pevent           *pevent;
+       struct tep_handle       *pevent;
        int                     filters;
        struct filter_type      *event_filters;
-       char                    error_buffer[PEVENT_FILTER_ERROR_BUFSZ];
+       char                    error_buffer[TEP_FILTER_ERROR_BUFSZ];
 };
 
-struct event_filter *pevent_filter_alloc(struct pevent *pevent);
+struct event_filter *tep_filter_alloc(struct tep_handle *pevent);
 
 /* for backward compatibility */
-#define FILTER_NONE            PEVENT_ERRNO__NO_FILTER
-#define FILTER_NOEXIST         PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_MISS            PEVENT_ERRNO__FILTER_MISS
-#define FILTER_MATCH           PEVENT_ERRNO__FILTER_MATCH
+#define FILTER_NONE            TEP_ERRNO__NO_FILTER
+#define FILTER_NOEXIST         TEP_ERRNO__FILTER_NOT_FOUND
+#define FILTER_MISS            TEP_ERRNO__FILTER_MISS
+#define FILTER_MATCH           TEP_ERRNO__FILTER_MATCH
 
 enum filter_trivial_type {
        FILTER_TRIVIAL_FALSE,
@@ -953,39 +953,39 @@ enum filter_trivial_type {
        FILTER_TRIVIAL_BOTH,
 };
 
-enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
-                                              const char *filter_str);
+enum tep_errno tep_filter_add_filter_str(struct event_filter *filter,
+                                        const char *filter_str);
 
-enum pevent_errno pevent_filter_match(struct event_filter *filter,
-                                     struct pevent_record *record);
+enum tep_errno tep_filter_match(struct event_filter *filter,
+                               struct tep_record *record);
 
-int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
-                          char *buf, size_t buflen);
+int tep_filter_strerror(struct event_filter *filter, enum tep_errno err,
+                       char *buf, size_t buflen);
 
-int pevent_event_filtered(struct event_filter *filter,
-                         int event_id);
+int tep_event_filtered(struct event_filter *filter,
+                      int event_id);
 
-void pevent_filter_reset(struct event_filter *filter);
+void tep_filter_reset(struct event_filter *filter);
 
-int pevent_filter_clear_trivial(struct event_filter *filter,
-                                enum filter_trivial_type type);
+int tep_filter_clear_trivial(struct event_filter *filter,
+                            enum filter_trivial_type type);
 
-void pevent_filter_free(struct event_filter *filter);
+void tep_filter_free(struct event_filter *filter);
 
-char *pevent_filter_make_string(struct event_filter *filter, int event_id);
+char *tep_filter_make_string(struct event_filter *filter, int event_id);
 
-int pevent_filter_remove_event(struct event_filter *filter,
-                              int event_id);
+int tep_filter_remove_event(struct event_filter *filter,
+                           int event_id);
 
-int pevent_filter_event_has_trivial(struct event_filter *filter,
-                                   int event_id,
-                                   enum filter_trivial_type type);
+int tep_filter_event_has_trivial(struct event_filter *filter,
+                                int event_id,
+                                enum filter_trivial_type type);
 
-int pevent_filter_copy(struct event_filter *dest, struct event_filter *source);
+int tep_filter_copy(struct event_filter *dest, struct event_filter *source);
 
-int pevent_update_trivial(struct event_filter *dest, struct event_filter *source,
-                         enum filter_trivial_type type);
+int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
+                       enum filter_trivial_type type);
 
-int pevent_filter_compare(struct event_filter *filter1, struct event_filter *filter2);
+int tep_filter_compare(struct event_filter *filter1, struct event_filter *filter2);
 
 #endif /* _PARSE_EVENTS_H */
index d542cb60ca1a94bc54cb54245d219e1159c9e16d..f17e25097e1e2573f218639f601ecb6102f67b8e 100644 (file)
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
 #include <ctype.h>
@@ -34,7 +20,7 @@
 
 static struct registered_plugin_options {
        struct registered_plugin_options        *next;
-       struct pevent_plugin_option             *options;
+       struct tep_plugin_option                *options;
 } *registered_options;
 
 static struct trace_plugin_options {
@@ -58,7 +44,7 @@ static void lower_case(char *str)
                *str = tolower(*str);
 }
 
-static int update_option_value(struct pevent_plugin_option *op, const char *val)
+static int update_option_value(struct tep_plugin_option *op, const char *val)
 {
        char *op_val;
 
@@ -97,7 +83,7 @@ static int update_option_value(struct pevent_plugin_option *op, const char *val)
 }
 
 /**
- * traceevent_plugin_list_options - get list of plugin options
+ * tep_plugin_list_options - get list of plugin options
  *
  * Returns an array of char strings that list the currently registered
  * plugin options in the format of <plugin>:<option>. This list can be
@@ -106,12 +92,12 @@ static int update_option_value(struct pevent_plugin_option *op, const char *val)
  * Returns NULL if there's no options registered. On error it returns
  * INVALID_PLUGIN_LIST_OPTION
  *
- * Must be freed with traceevent_plugin_free_options_list().
+ * Must be freed with tep_plugin_free_options_list().
  */
-char **traceevent_plugin_list_options(void)
+char **tep_plugin_list_options(void)
 {
        struct registered_plugin_options *reg;
-       struct pevent_plugin_option *op;
+       struct tep_plugin_option *op;
        char **list = NULL;
        char *name;
        int count = 0;
@@ -146,7 +132,7 @@ char **traceevent_plugin_list_options(void)
        return INVALID_PLUGIN_LIST_OPTION;
 }
 
-void traceevent_plugin_free_options_list(char **list)
+void tep_plugin_free_options_list(char **list)
 {
        int i;
 
@@ -163,7 +149,7 @@ void traceevent_plugin_free_options_list(char **list)
 }
 
 static int
-update_option(const char *file, struct pevent_plugin_option *option)
+update_option(const char *file, struct tep_plugin_option *option)
 {
        struct trace_plugin_options *op;
        char *plugin;
@@ -215,14 +201,14 @@ update_option(const char *file, struct pevent_plugin_option *option)
 }
 
 /**
- * traceevent_plugin_add_options - Add a set of options by a plugin
+ * tep_plugin_add_options - Add a set of options by a plugin
  * @name: The name of the plugin adding the options
  * @options: The set of options being loaded
  *
  * Sets the options with the values that have been added by user.
  */
-int traceevent_plugin_add_options(const char *name,
-                                 struct pevent_plugin_option *options)
+int tep_plugin_add_options(const char *name,
+                          struct tep_plugin_option *options)
 {
        struct registered_plugin_options *reg;
 
@@ -241,10 +227,10 @@ int traceevent_plugin_add_options(const char *name,
 }
 
 /**
- * traceevent_plugin_remove_options - remove plugin options that were registered
- * @options: Options to removed that were registered with traceevent_plugin_add_options
+ * tep_plugin_remove_options - remove plugin options that were registered
+ * @options: Options to removed that were registered with tep_plugin_add_options
  */
-void traceevent_plugin_remove_options(struct pevent_plugin_option *options)
+void tep_plugin_remove_options(struct tep_plugin_option *options)
 {
        struct registered_plugin_options **last;
        struct registered_plugin_options *reg;
@@ -260,19 +246,19 @@ void traceevent_plugin_remove_options(struct pevent_plugin_option *options)
 }
 
 /**
- * traceevent_print_plugins - print out the list of plugins loaded
+ * tep_print_plugins - print out the list of plugins loaded
  * @s: the trace_seq descripter to write to
  * @prefix: The prefix string to add before listing the option name
  * @suffix: The suffix string ot append after the option name
- * @list: The list of plugins (usually returned by traceevent_load_plugins()
+ * @list: The list of plugins (usually returned by tep_load_plugins()
  *
  * Writes to the trace_seq @s the list of plugins (files) that is
- * returned by traceevent_load_plugins(). Use @prefix and @suffix for formating:
+ * returned by tep_load_plugins(). Use @prefix and @suffix for formating:
  * @prefix = "  ", @suffix = "\n".
  */
-void traceevent_print_plugins(struct trace_seq *s,
-                             const char *prefix, const char *suffix,
-                             const struct plugin_list *list)
+void tep_print_plugins(struct trace_seq *s,
+                      const char *prefix, const char *suffix,
+                      const struct plugin_list *list)
 {
        while (list) {
                trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
@@ -281,11 +267,11 @@ void traceevent_print_plugins(struct trace_seq *s,
 }
 
 static void
-load_plugin(struct pevent *pevent, const char *path,
+load_plugin(struct tep_handle *pevent, const char *path,
            const char *file, void *data)
 {
        struct plugin_list **plugin_list = data;
-       pevent_plugin_load_func func;
+       tep_plugin_load_func func;
        struct plugin_list *list;
        const char *alias;
        char *plugin;
@@ -305,14 +291,14 @@ load_plugin(struct pevent *pevent, const char *path,
                goto out_free;
        }
 
-       alias = dlsym(handle, PEVENT_PLUGIN_ALIAS_NAME);
+       alias = dlsym(handle, TEP_PLUGIN_ALIAS_NAME);
        if (!alias)
                alias = file;
 
-       func = dlsym(handle, PEVENT_PLUGIN_LOADER_NAME);
+       func = dlsym(handle, TEP_PLUGIN_LOADER_NAME);
        if (!func) {
                warning("could not find func '%s' in plugin '%s'\n%s\n",
-                       PEVENT_PLUGIN_LOADER_NAME, plugin, dlerror());
+                       TEP_PLUGIN_LOADER_NAME, plugin, dlerror());
                goto out_free;
        }
 
@@ -336,9 +322,9 @@ load_plugin(struct pevent *pevent, const char *path,
 }
 
 static void
-load_plugins_dir(struct pevent *pevent, const char *suffix,
+load_plugins_dir(struct tep_handle *pevent, const char *suffix,
                 const char *path,
-                void (*load_plugin)(struct pevent *pevent,
+                void (*load_plugin)(struct tep_handle *pevent,
                                     const char *path,
                                     const char *name,
                                     void *data),
@@ -378,8 +364,8 @@ load_plugins_dir(struct pevent *pevent, const char *suffix,
 }
 
 static void
-load_plugins(struct pevent *pevent, const char *suffix,
-            void (*load_plugin)(struct pevent *pevent,
+load_plugins(struct tep_handle *pevent, const char *suffix,
+            void (*load_plugin)(struct tep_handle *pevent,
                                 const char *path,
                                 const char *name,
                                 void *data),
@@ -390,7 +376,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
        char *envdir;
        int ret;
 
-       if (pevent->flags & PEVENT_DISABLE_PLUGINS)
+       if (pevent->flags & TEP_DISABLE_PLUGINS)
                return;
 
        /*
@@ -398,7 +384,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
         * check that first.
         */
 #ifdef PLUGIN_DIR
-       if (!(pevent->flags & PEVENT_DISABLE_SYS_PLUGINS))
+       if (!(pevent->flags & TEP_DISABLE_SYS_PLUGINS))
                load_plugins_dir(pevent, suffix, PLUGIN_DIR,
                                 load_plugin, data);
 #endif
@@ -431,7 +417,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
 }
 
 struct plugin_list*
-traceevent_load_plugins(struct pevent *pevent)
+tep_load_plugins(struct tep_handle *pevent)
 {
        struct plugin_list *list = NULL;
 
@@ -440,15 +426,15 @@ traceevent_load_plugins(struct pevent *pevent)
 }
 
 void
-traceevent_unload_plugins(struct plugin_list *plugin_list, struct pevent *pevent)
+tep_unload_plugins(struct plugin_list *plugin_list, struct tep_handle *pevent)
 {
-       pevent_plugin_unload_func func;
+       tep_plugin_unload_func func;
        struct plugin_list *list;
 
        while (plugin_list) {
                list = plugin_list;
                plugin_list = list->next;
-               func = dlsym(list->handle, PEVENT_PLUGIN_UNLOADER_NAME);
+               func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
                if (func)
                        func(pevent);
                dlclose(list->handle);
index d1dc2170e4023dbf8cfec33b386bcecd15972035..0560b96a31d1b6e8f972870c7e76dd3dec6e2af5 100644 (file)
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
 /*
  * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #ifndef __UTIL_H
 #define __UTIL_H
index ca424b157e4653a009a078604a217bf3853a8ed8..af2a1f3b7424141483c514daf62260b3fba08cf7 100644 (file)
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index 431e8b309f6e1bb1d984e81959bf66d449d7645b..e76154c02ee7ac0a2c7c7a72cf2e4e04cf323b1f 100644 (file)
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
@@ -51,8 +37,8 @@ static void show_error(char *error_buf, const char *fmt, ...)
        int len;
        int i;
 
-       input = pevent_get_input_buf();
-       index = pevent_get_input_buf_ptr();
+       input = tep_get_input_buf();
+       index = tep_get_input_buf_ptr();
        len = input ? strlen(input) : 0;
 
        if (len) {
@@ -66,13 +52,13 @@ static void show_error(char *error_buf, const char *fmt, ...)
        }
 
        va_start(ap, fmt);
-       vsnprintf(error_buf + len, PEVENT_FILTER_ERROR_BUFSZ - len, fmt, ap);
+       vsnprintf(error_buf + len, TEP_FILTER_ERROR_BUFSZ - len, fmt, ap);
        va_end(ap);
 }
 
 static void free_token(char *token)
 {
-       pevent_free_token(token);
+       tep_free_token(token);
 }
 
 static enum event_type read_token(char **tok)
@@ -82,13 +68,13 @@ static enum event_type read_token(char **tok)
 
        do {
                free_token(token);
-               type = pevent_read_token(&token);
+               type = tep_read_token(&token);
        } while (type == EVENT_NEWLINE || type == EVENT_SPACE);
 
        /* If token is = or ! check to see if the next char is ~ */
        if (token &&
            (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
-           pevent_peek_char() == '~') {
+           tep_peek_char() == '~') {
                /* append it */
                *tok = malloc(3);
                if (*tok == NULL) {
@@ -98,7 +84,7 @@ static enum event_type read_token(char **tok)
                sprintf(*tok, "%c%c", *token, '~');
                free_token(token);
                /* Now remove the '~' from the buffer */
-               pevent_read_token(&token);
+               tep_read_token(&token);
                free_token(token);
        } else
                *tok = token;
@@ -167,7 +153,7 @@ add_filter_type(struct event_filter *filter, int id)
 
        filter_type = &filter->event_filters[i];
        filter_type->event_id = id;
-       filter_type->event = pevent_find_event(filter->pevent, id);
+       filter_type->event = tep_find_event(filter->pevent, id);
        filter_type->filter = NULL;
 
        filter->filters++;
@@ -176,10 +162,10 @@ add_filter_type(struct event_filter *filter, int id)
 }
 
 /**
- * pevent_filter_alloc - create a new event filter
+ * tep_filter_alloc - create a new event filter
  * @pevent: The pevent that this filter is associated with
  */
-struct event_filter *pevent_filter_alloc(struct pevent *pevent)
+struct event_filter *tep_filter_alloc(struct tep_handle *pevent)
 {
        struct event_filter *filter;
 
@@ -189,7 +175,7 @@ struct event_filter *pevent_filter_alloc(struct pevent *pevent)
 
        memset(filter, 0, sizeof(*filter));
        filter->pevent = pevent;
-       pevent_ref(pevent);
+       tep_ref(pevent);
 
        return filter;
 }
@@ -268,8 +254,8 @@ static int event_match(struct event_format *event,
                !regexec(ereg, event->name, 0, NULL, 0);
 }
 
-static enum pevent_errno
-find_event(struct pevent *pevent, struct event_list **events,
+static enum tep_errno
+find_event(struct tep_handle *pevent, struct event_list **events,
           char *sys_name, char *event_name)
 {
        struct event_format *event;
@@ -289,26 +275,26 @@ find_event(struct pevent *pevent, struct event_list **events,
 
        ret = asprintf(&reg, "^%s$", event_name);
        if (ret < 0)
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
 
        ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
        free(reg);
 
        if (ret)
-               return PEVENT_ERRNO__INVALID_EVENT_NAME;
+               return TEP_ERRNO__INVALID_EVENT_NAME;
 
        if (sys_name) {
                ret = asprintf(&reg, "^%s$", sys_name);
                if (ret < 0) {
                        regfree(&ereg);
-                       return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       return TEP_ERRNO__MEM_ALLOC_FAILED;
                }
 
                ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
                free(reg);
                if (ret) {
                        regfree(&ereg);
-                       return PEVENT_ERRNO__INVALID_EVENT_NAME;
+                       return TEP_ERRNO__INVALID_EVENT_NAME;
                }
        }
 
@@ -328,9 +314,9 @@ find_event(struct pevent *pevent, struct event_list **events,
                regfree(&sreg);
 
        if (!match)
-               return PEVENT_ERRNO__EVENT_NOT_FOUND;
+               return TEP_ERRNO__EVENT_NOT_FOUND;
        if (fail)
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
 
        return 0;
 }
@@ -346,7 +332,7 @@ static void free_events(struct event_list *events)
        }
 }
 
-static enum pevent_errno
+static enum tep_errno
 create_arg_item(struct event_format *event, const char *token,
                enum event_type type, struct filter_arg **parg, char *error_str)
 {
@@ -356,7 +342,7 @@ create_arg_item(struct event_format *event, const char *token,
        arg = allocate_arg();
        if (arg == NULL) {
                show_error(error_str, "failed to allocate filter arg");
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
        }
 
        switch (type) {
@@ -370,7 +356,7 @@ create_arg_item(struct event_format *event, const char *token,
                if (!arg->value.str) {
                        free_arg(arg);
                        show_error(error_str, "failed to allocate string filter arg");
-                       return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       return TEP_ERRNO__MEM_ALLOC_FAILED;
                }
                break;
        case EVENT_ITEM:
@@ -382,7 +368,7 @@ create_arg_item(struct event_format *event, const char *token,
                        break;
                }
                /* Consider this a field */
-               field = pevent_find_any_field(event, token);
+               field = tep_find_any_field(event, token);
                if (!field) {
                        /* If token is 'COMM' or 'CPU' then it is special */
                        if (strcmp(token, COMM) == 0) {
@@ -402,7 +388,7 @@ create_arg_item(struct event_format *event, const char *token,
        default:
                free_arg(arg);
                show_error(error_str, "expected a value but found %s", token);
-               return PEVENT_ERRNO__UNEXPECTED_TYPE;
+               return TEP_ERRNO__UNEXPECTED_TYPE;
        }
        *parg = arg;
        return 0;
@@ -454,7 +440,7 @@ create_arg_cmp(enum filter_cmp_type ctype)
        return arg;
 }
 
-static enum pevent_errno
+static enum tep_errno
 add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
 {
        struct filter_arg *left;
@@ -487,7 +473,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                        break;
                default:
                        show_error(error_str, "Illegal rvalue");
-                       return PEVENT_ERRNO__ILLEGAL_RVALUE;
+                       return TEP_ERRNO__ILLEGAL_RVALUE;
                }
 
                /*
@@ -534,7 +520,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                        if (left->type != FILTER_ARG_FIELD) {
                                show_error(error_str,
                                           "Illegal lvalue for string comparison");
-                               return PEVENT_ERRNO__ILLEGAL_LVALUE;
+                               return TEP_ERRNO__ILLEGAL_LVALUE;
                        }
 
                        /* Make sure this is a valid string compare */
@@ -553,13 +539,13 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                                        show_error(error_str,
                                                   "RegEx '%s' did not compute",
                                                   str);
-                                       return PEVENT_ERRNO__INVALID_REGEX;
+                                       return TEP_ERRNO__INVALID_REGEX;
                                }
                                break;
                        default:
                                show_error(error_str,
                                           "Illegal comparison for string");
-                               return PEVENT_ERRNO__ILLEGAL_STRING_CMP;
+                               return TEP_ERRNO__ILLEGAL_STRING_CMP;
                        }
 
                        op->type = FILTER_ARG_STR;
@@ -568,7 +554,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                        op->str.val = strdup(str);
                        if (!op->str.val) {
                                show_error(error_str, "Failed to allocate string filter");
-                               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                               return TEP_ERRNO__MEM_ALLOC_FAILED;
                        }
                        /*
                         * Need a buffer to copy data for tests
@@ -576,7 +562,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                        op->str.buffer = malloc(op->str.field->size + 1);
                        if (!op->str.buffer) {
                                show_error(error_str, "Failed to allocate string filter");
-                               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                               return TEP_ERRNO__MEM_ALLOC_FAILED;
                        }
                        /* Null terminate this buffer */
                        op->str.buffer[op->str.field->size] = 0;
@@ -595,7 +581,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
                        case FILTER_CMP_NOT_REGEX:
                                show_error(error_str,
                                           "Op not allowed with integers");
-                               return PEVENT_ERRNO__ILLEGAL_INTEGER_CMP;
+                               return TEP_ERRNO__ILLEGAL_INTEGER_CMP;
 
                        default:
                                break;
@@ -616,7 +602,7 @@ add_right(struct filter_arg *op, struct filter_arg *arg, char *error_str)
 
  out_fail:
        show_error(error_str, "Syntax error");
-       return PEVENT_ERRNO__SYNTAX_ERROR;
+       return TEP_ERRNO__SYNTAX_ERROR;
 }
 
 static struct filter_arg *
@@ -629,7 +615,7 @@ rotate_op_right(struct filter_arg *a, struct filter_arg *b)
        return arg;
 }
 
-static enum pevent_errno add_left(struct filter_arg *op, struct filter_arg *arg)
+static enum tep_errno add_left(struct filter_arg *op, struct filter_arg *arg)
 {
        switch (op->type) {
        case FILTER_ARG_EXP:
@@ -648,11 +634,11 @@ static enum pevent_errno add_left(struct filter_arg *op, struct filter_arg *arg)
                /* left arg of compares must be a field */
                if (arg->type != FILTER_ARG_FIELD &&
                    arg->type != FILTER_ARG_BOOLEAN)
-                       return PEVENT_ERRNO__INVALID_ARG_TYPE;
+                       return TEP_ERRNO__INVALID_ARG_TYPE;
                op->num.left = arg;
                break;
        default:
-               return PEVENT_ERRNO__INVALID_ARG_TYPE;
+               return TEP_ERRNO__INVALID_ARG_TYPE;
        }
        return 0;
 }
@@ -765,7 +751,7 @@ enum filter_vals {
        FILTER_VAL_TRUE,
 };
 
-static enum pevent_errno
+static enum tep_errno
 reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
                struct filter_arg *arg, char *error_str)
 {
@@ -775,7 +761,7 @@ reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
        if (parent->type != FILTER_ARG_OP &&
            arg->type != FILTER_ARG_OP) {
                show_error(error_str, "can not reparent other than OP");
-               return PEVENT_ERRNO__REPARENT_NOT_OP;
+               return TEP_ERRNO__REPARENT_NOT_OP;
        }
 
        /* Get the sibling */
@@ -787,7 +773,7 @@ reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
                other_child = old_child->op.right;
        } else {
                show_error(error_str, "Error in reparent op, find other child");
-               return PEVENT_ERRNO__REPARENT_FAILED;
+               return TEP_ERRNO__REPARENT_FAILED;
        }
 
        /* Detach arg from old_child */
@@ -808,7 +794,7 @@ reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
                ptr = &parent->op.left;
        else {
                show_error(error_str, "Error in reparent op");
-               return PEVENT_ERRNO__REPARENT_FAILED;
+               return TEP_ERRNO__REPARENT_FAILED;
        }
 
        *ptr = arg;
@@ -817,7 +803,7 @@ reparent_op_arg(struct filter_arg *parent, struct filter_arg *old_child,
        return 0;
 }
 
-/* Returns either filter_vals (success) or pevent_errno (failfure) */
+/* Returns either filter_vals (success) or tep_errno (failfure) */
 static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
                    char *error_str)
 {
@@ -912,7 +898,7 @@ static int test_arg(struct filter_arg *parent, struct filter_arg *arg,
                return rval;
        default:
                show_error(error_str, "bad arg in filter tree");
-               return PEVENT_ERRNO__BAD_FILTER_ARG;
+               return TEP_ERRNO__BAD_FILTER_ARG;
        }
        return FILTER_VAL_NORM;
 }
@@ -937,7 +923,7 @@ static int collapse_tree(struct filter_arg *arg,
                        arg->boolean.value = ret == FILTER_VAL_TRUE;
                } else {
                        show_error(error_str, "Failed to allocate filter arg");
-                       ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       ret = TEP_ERRNO__MEM_ALLOC_FAILED;
                }
                break;
 
@@ -952,7 +938,7 @@ static int collapse_tree(struct filter_arg *arg,
        return ret;
 }
 
-static enum pevent_errno
+static enum tep_errno
 process_filter(struct event_format *event, struct filter_arg **parg,
               char *error_str, int not)
 {
@@ -966,7 +952,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
        enum filter_op_type btype;
        enum filter_exp_type etype;
        enum filter_cmp_type ctype;
-       enum pevent_errno ret;
+       enum tep_errno ret;
 
        *parg = NULL;
 
@@ -1004,7 +990,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                case EVENT_DELIM:
                        if (*token == ',') {
                                show_error(error_str, "Illegal token ','");
-                               ret = PEVENT_ERRNO__ILLEGAL_TOKEN;
+                               ret = TEP_ERRNO__ILLEGAL_TOKEN;
                                goto fail;
                        }
 
@@ -1012,22 +998,22 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                                if (left_item) {
                                        show_error(error_str,
                                                   "Open paren can not come after item");
-                                       ret = PEVENT_ERRNO__INVALID_PAREN;
+                                       ret = TEP_ERRNO__INVALID_PAREN;
                                        goto fail;
                                }
                                if (current_exp) {
                                        show_error(error_str,
                                                   "Open paren can not come after expression");
-                                       ret = PEVENT_ERRNO__INVALID_PAREN;
+                                       ret = TEP_ERRNO__INVALID_PAREN;
                                        goto fail;
                                }
 
                                ret = process_filter(event, &arg, error_str, 0);
-                               if (ret != PEVENT_ERRNO__UNBALANCED_PAREN) {
+                               if (ret != TEP_ERRNO__UNBALANCED_PAREN) {
                                        if (ret == 0) {
                                                show_error(error_str,
                                                           "Unbalanced number of '('");
-                                               ret = PEVENT_ERRNO__UNBALANCED_PAREN;
+                                               ret = TEP_ERRNO__UNBALANCED_PAREN;
                                        }
                                        goto fail;
                                }
@@ -1064,7 +1050,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                                else
                                        *parg = current_exp;
                                free(token);
-                               return PEVENT_ERRNO__UNBALANCED_PAREN;
+                               return TEP_ERRNO__UNBALANCED_PAREN;
                        }
                        break;
 
@@ -1091,7 +1077,7 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                        case OP_NONE:
                                show_error(error_str,
                                           "Unknown op token %s", token);
-                               ret = PEVENT_ERRNO__UNKNOWN_TOKEN;
+                               ret = TEP_ERRNO__UNKNOWN_TOKEN;
                                goto fail;
                        }
 
@@ -1179,11 +1165,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
 
  fail_alloc:
        show_error(error_str, "failed to allocate filter arg");
-       ret = PEVENT_ERRNO__MEM_ALLOC_FAILED;
+       ret = TEP_ERRNO__MEM_ALLOC_FAILED;
        goto fail;
  fail_syntax:
        show_error(error_str, "Syntax error");
-       ret = PEVENT_ERRNO__SYNTAX_ERROR;
+       ret = TEP_ERRNO__SYNTAX_ERROR;
  fail:
        free_arg(current_op);
        free_arg(current_exp);
@@ -1192,13 +1178,13 @@ process_filter(struct event_format *event, struct filter_arg **parg,
        return ret;
 }
 
-static enum pevent_errno
+static enum tep_errno
 process_event(struct event_format *event, const char *filter_str,
              struct filter_arg **parg, char *error_str)
 {
        int ret;
 
-       pevent_buffer_init(filter_str, strlen(filter_str));
+       tep_buffer_init(filter_str, strlen(filter_str));
 
        ret = process_filter(event, parg, error_str, 0);
        if (ret < 0)
@@ -1208,7 +1194,7 @@ process_event(struct event_format *event, const char *filter_str,
        if (!*parg) {
                *parg = allocate_arg();
                if (*parg == NULL)
-                       return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       return TEP_ERRNO__MEM_ALLOC_FAILED;
 
                (*parg)->type = FILTER_ARG_BOOLEAN;
                (*parg)->boolean.value = FILTER_FALSE;
@@ -1217,13 +1203,13 @@ process_event(struct event_format *event, const char *filter_str,
        return 0;
 }
 
-static enum pevent_errno
+static enum tep_errno
 filter_event(struct event_filter *filter, struct event_format *event,
             const char *filter_str, char *error_str)
 {
        struct filter_type *filter_type;
        struct filter_arg *arg;
-       enum pevent_errno ret;
+       enum tep_errno ret;
 
        if (filter_str) {
                ret = process_event(event, filter_str, &arg, error_str);
@@ -1234,7 +1220,7 @@ filter_event(struct event_filter *filter, struct event_format *event,
                /* just add a TRUE arg */
                arg = allocate_arg();
                if (arg == NULL)
-                       return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       return TEP_ERRNO__MEM_ALLOC_FAILED;
 
                arg->type = FILTER_ARG_BOOLEAN;
                arg->boolean.value = FILTER_TRUE;
@@ -1242,7 +1228,7 @@ filter_event(struct event_filter *filter, struct event_format *event,
 
        filter_type = add_filter_type(filter, event->id);
        if (filter_type == NULL)
-               return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+               return TEP_ERRNO__MEM_ALLOC_FAILED;
 
        if (filter_type->filter)
                free_arg(filter_type->filter);
@@ -1254,23 +1240,23 @@ filter_event(struct event_filter *filter, struct event_format *event,
 static void filter_init_error_buf(struct event_filter *filter)
 {
        /* clear buffer to reset show error */
-       pevent_buffer_init("", 0);
+       tep_buffer_init("", 0);
        filter->error_buffer[0] = '\0';
 }
 
 /**
- * pevent_filter_add_filter_str - add a new filter
+ * tep_filter_add_filter_str - add a new filter
  * @filter: the event filter to add to
  * @filter_str: the filter string that contains the filter
  *
  * Returns 0 if the filter was successfully added or a
- * negative error code.  Use pevent_filter_strerror() to see
+ * negative error code.  Use tep_filter_strerror() to see
  * actual error message in case of error.
  */
-enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
-                                              const char *filter_str)
+enum tep_errno tep_filter_add_filter_str(struct event_filter *filter,
+                                        const char *filter_str)
 {
-       struct pevent *pevent = filter->pevent;
+       struct tep_handle *pevent = filter->pevent;
        struct event_list *event;
        struct event_list *events = NULL;
        const char *filter_start;
@@ -1279,7 +1265,7 @@ enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
        char *event_name = NULL;
        char *sys_name = NULL;
        char *sp;
-       enum pevent_errno rtn = 0; /* PEVENT_ERRNO__SUCCESS */
+       enum tep_errno rtn = 0; /* TEP_ERRNO__SUCCESS */
        int len;
        int ret;
 
@@ -1305,7 +1291,7 @@ enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
                if (this_event == NULL) {
                        /* This can only happen when events is NULL, but still */
                        free_events(events);
-                       return PEVENT_ERRNO__MEM_ALLOC_FAILED;
+                       return TEP_ERRNO__MEM_ALLOC_FAILED;
                }
                memcpy(this_event, filter_str, len);
                this_event[len] = 0;
@@ -1322,7 +1308,7 @@ enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
                        /* This can only happen when events is NULL, but still */
                        free_events(events);
                        free(this_event);
-                       return PEVENT_ERRNO__FILTER_NOT_FOUND;
+                       return TEP_ERRNO__FILTER_NOT_FOUND;
                }
 
                /* Find this event */
@@ -1349,7 +1335,7 @@ enum pevent_errno pevent_filter_add_filter_str(struct event_filter *filter,
 
                if (ret >= 0 && pevent->test_filters) {
                        char *test;
-                       test = pevent_filter_make_string(filter, event->event->id);
+                       test = tep_filter_make_string(filter, event->event->id);
                        if (test) {
                                printf(" '%s: %s'\n", event->event->name, test);
                                free(test);
@@ -1371,7 +1357,7 @@ static void free_filter_type(struct filter_type *filter_type)
 }
 
 /**
- * pevent_filter_strerror - fill error message in a buffer
+ * tep_filter_strerror - fill error message in a buffer
  * @filter: the event filter contains error
  * @err: the error code
  * @buf: the buffer to be filled in
@@ -1379,10 +1365,10 @@ static void free_filter_type(struct filter_type *filter_type)
  *
  * Returns 0 if message was filled successfully, -1 if error
  */
-int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
-                          char *buf, size_t buflen)
+int tep_filter_strerror(struct event_filter *filter, enum tep_errno err,
+                       char *buf, size_t buflen)
 {
-       if (err <= __PEVENT_ERRNO__START || err >= __PEVENT_ERRNO__END)
+       if (err <= __TEP_ERRNO__START || err >= __TEP_ERRNO__END)
                return -1;
 
        if (strlen(filter->error_buffer) > 0) {
@@ -1393,11 +1379,11 @@ int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
                return 0;
        }
 
-       return pevent_strerror(filter->pevent, err, buf, buflen);
+       return tep_strerror(filter->pevent, err, buf, buflen);
 }
 
 /**
- * pevent_filter_remove_event - remove a filter for an event
+ * tep_filter_remove_event - remove a filter for an event
  * @filter: the event filter to remove from
  * @event_id: the event to remove a filter for
  *
@@ -1407,8 +1393,8 @@ int pevent_filter_strerror(struct event_filter *filter, enum pevent_errno err,
  * Returns 1: if an event was removed
  *   0: if the event was not found
  */
-int pevent_filter_remove_event(struct event_filter *filter,
-                              int event_id)
+int tep_filter_remove_event(struct event_filter *filter,
+                           int event_id)
 {
        struct filter_type *filter_type;
        unsigned long len;
@@ -1437,12 +1423,12 @@ int pevent_filter_remove_event(struct event_filter *filter,
 }
 
 /**
- * pevent_filter_reset - clear all filters in a filter
+ * tep_filter_reset - clear all filters in a filter
  * @filter: the event filter to reset
  *
  * Removes all filters from a filter and resets it.
  */
-void pevent_filter_reset(struct event_filter *filter)
+void tep_filter_reset(struct event_filter *filter)
 {
        int i;
 
@@ -1454,11 +1440,11 @@ void pevent_filter_reset(struct event_filter *filter)
        filter->event_filters = NULL;
 }
 
-void pevent_filter_free(struct event_filter *filter)
+void tep_filter_free(struct event_filter *filter)
 {
-       pevent_unref(filter->pevent);
+       tep_unref(filter->pevent);
 
-       pevent_filter_reset(filter);
+       tep_filter_reset(filter);
 
        free(filter);
 }
@@ -1478,7 +1464,7 @@ static int copy_filter_type(struct event_filter *filter,
        /* Can't assume that the pevent's are the same */
        sys = filter_type->event->system;
        name = filter_type->event->name;
-       event = pevent_find_event_by_name(filter->pevent, sys, name);
+       event = tep_find_event_by_name(filter->pevent, sys, name);
        if (!event)
                return -1;
 
@@ -1515,18 +1501,18 @@ static int copy_filter_type(struct event_filter *filter,
 }
 
 /**
- * pevent_filter_copy - copy a filter using another filter
+ * tep_filter_copy - copy a filter using another filter
  * @dest - the filter to copy to
  * @source - the filter to copy from
  *
  * Returns 0 on success and -1 if not all filters were copied
  */
-int pevent_filter_copy(struct event_filter *dest, struct event_filter *source)
+int tep_filter_copy(struct event_filter *dest, struct event_filter *source)
 {
        int ret = 0;
        int i;
 
-       pevent_filter_reset(dest);
+       tep_filter_reset(dest);
 
        for (i = 0; i < source->filters; i++) {
                if (copy_filter_type(dest, source, &source->event_filters[i]))
@@ -1537,7 +1523,7 @@ int pevent_filter_copy(struct event_filter *dest, struct event_filter *source)
 
 
 /**
- * pevent_update_trivial - update the trivial filters with the given filter
+ * tep_update_trivial - update the trivial filters with the given filter
  * @dest - the filter to update
  * @source - the filter as the source of the update
  * @type - the type of trivial filter to update.
@@ -1547,11 +1533,11 @@ int pevent_filter_copy(struct event_filter *dest, struct event_filter *source)
  * Returns 0 on success and -1 if there was a problem updating, but
  *   events may have still been updated on error.
  */
-int pevent_update_trivial(struct event_filter *dest, struct event_filter *source,
-                         enum filter_trivial_type type)
+int tep_update_trivial(struct event_filter *dest, struct event_filter *source,
+                      enum filter_trivial_type type)
 {
-       struct pevent *src_pevent;
-       struct pevent *dest_pevent;
+       struct tep_handle *src_pevent;
+       struct tep_handle *dest_pevent;
        struct event_format *event;
        struct filter_type *filter_type;
        struct filter_arg *arg;
@@ -1578,14 +1564,14 @@ int pevent_update_trivial(struct event_filter *dest, struct event_filter *source
 
                if (src_pevent != dest_pevent) {
                        /* do a look up */
-                       event = pevent_find_event_by_name(src_pevent,
-                                                         event->system,
-                                                         event->name);
+                       event = tep_find_event_by_name(src_pevent,
+                                                      event->system,
+                                                      event->name);
                        if (!event)
                                return -1;
                }
 
-               str = pevent_filter_make_string(source, event->id);
+               str = tep_filter_make_string(source, event->id);
                if (!str)
                        continue;
 
@@ -1598,7 +1584,7 @@ int pevent_update_trivial(struct event_filter *dest, struct event_filter *source
 }
 
 /**
- * pevent_filter_clear_trivial - clear TRUE and FALSE filters
+ * tep_filter_clear_trivial - clear TRUE and FALSE filters
  * @filter: the filter to remove trivial filters from
  * @type: remove only true, false, or both
  *
@@ -1606,8 +1592,8 @@ int pevent_update_trivial(struct event_filter *dest, struct event_filter *source
  *
  * Returns 0 on success and -1 if there was a problem.
  */
-int pevent_filter_clear_trivial(struct event_filter *filter,
-                                enum filter_trivial_type type)
+int tep_filter_clear_trivial(struct event_filter *filter,
+                            enum filter_trivial_type type)
 {
        struct filter_type *filter_type;
        int count = 0;
@@ -1653,14 +1639,14 @@ int pevent_filter_clear_trivial(struct event_filter *filter,
                return 0;
 
        for (i = 0; i < count; i++)
-               pevent_filter_remove_event(filter, ids[i]);
+               tep_filter_remove_event(filter, ids[i]);
 
        free(ids);
        return 0;
 }
 
 /**
- * pevent_filter_event_has_trivial - return true event contains trivial filter
+ * tep_filter_event_has_trivial - return true event contains trivial filter
  * @filter: the filter with the information
  * @event_id: the id of the event to test
  * @type: trivial type to test for (TRUE, FALSE, EITHER)
@@ -1668,9 +1654,9 @@ int pevent_filter_clear_trivial(struct event_filter *filter,
  * Returns 1 if the event contains a matching trivial type
  *  otherwise 0.
  */
-int pevent_filter_event_has_trivial(struct event_filter *filter,
-                                   int event_id,
-                                   enum filter_trivial_type type)
+int tep_filter_event_has_trivial(struct event_filter *filter,
+                                int event_id,
+                                enum filter_trivial_type type)
 {
        struct filter_type *filter_type;
 
@@ -1697,22 +1683,22 @@ int pevent_filter_event_has_trivial(struct event_filter *filter,
 }
 
 static int test_filter(struct event_format *event, struct filter_arg *arg,
-                      struct pevent_record *record, enum pevent_errno *err);
+                      struct tep_record *record, enum tep_errno *err);
 
 static const char *
-get_comm(struct event_format *event, struct pevent_record *record)
+get_comm(struct event_format *event, struct tep_record *record)
 {
        const char *comm;
        int pid;
 
-       pid = pevent_data_pid(event->pevent, record);
-       comm = pevent_data_comm_from_pid(event->pevent, pid);
+       pid = tep_data_pid(event->pevent, record);
+       comm = tep_data_comm_from_pid(event->pevent, pid);
        return comm;
 }
 
 static unsigned long long
 get_value(struct event_format *event,
-         struct format_field *field, struct pevent_record *record)
+         struct format_field *field, struct tep_record *record)
 {
        unsigned long long val;
 
@@ -1728,7 +1714,7 @@ get_value(struct event_format *event,
        if (field == &cpu)
                return record->cpu;
 
-       pevent_read_number_field(field, record->data, &val);
+       tep_read_number_field(field, record->data, &val);
 
        if (!(field->flags & FIELD_IS_SIGNED))
                return val;
@@ -1748,11 +1734,11 @@ get_value(struct event_format *event,
 
 static unsigned long long
 get_arg_value(struct event_format *event, struct filter_arg *arg,
-             struct pevent_record *record, enum pevent_errno *err);
+             struct tep_record *record, enum tep_errno *err);
 
 static unsigned long long
 get_exp_value(struct event_format *event, struct filter_arg *arg,
-             struct pevent_record *record, enum pevent_errno *err)
+             struct tep_record *record, enum tep_errno *err)
 {
        unsigned long long lval, rval;
 
@@ -1800,14 +1786,14 @@ get_exp_value(struct event_format *event, struct filter_arg *arg,
        case FILTER_EXP_NOT:
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__INVALID_EXP_TYPE;
+                       *err = TEP_ERRNO__INVALID_EXP_TYPE;
        }
        return 0;
 }
 
 static unsigned long long
 get_arg_value(struct event_format *event, struct filter_arg *arg,
-             struct pevent_record *record, enum pevent_errno *err)
+             struct tep_record *record, enum tep_errno *err)
 {
        switch (arg->type) {
        case FILTER_ARG_FIELD:
@@ -1816,7 +1802,7 @@ get_arg_value(struct event_format *event, struct filter_arg *arg,
        case FILTER_ARG_VALUE:
                if (arg->value.type != FILTER_NUMBER) {
                        if (!*err)
-                               *err = PEVENT_ERRNO__NOT_A_NUMBER;
+                               *err = TEP_ERRNO__NOT_A_NUMBER;
                }
                return arg->value.val;
 
@@ -1825,13 +1811,13 @@ get_arg_value(struct event_format *event, struct filter_arg *arg,
 
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__INVALID_ARG_TYPE;
+                       *err = TEP_ERRNO__INVALID_ARG_TYPE;
        }
        return 0;
 }
 
 static int test_num(struct event_format *event, struct filter_arg *arg,
-                   struct pevent_record *record, enum pevent_errno *err)
+                   struct tep_record *record, enum tep_errno *err)
 {
        unsigned long long lval, rval;
 
@@ -1866,15 +1852,15 @@ static int test_num(struct event_format *event, struct filter_arg *arg,
 
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__ILLEGAL_INTEGER_CMP;
+                       *err = TEP_ERRNO__ILLEGAL_INTEGER_CMP;
                return 0;
        }
 }
 
-static const char *get_field_str(struct filter_arg *arg, struct pevent_record *record)
+static const char *get_field_str(struct filter_arg *arg, struct tep_record *record)
 {
        struct event_format *event;
-       struct pevent *pevent;
+       struct tep_handle *pevent;
        unsigned long long addr;
        const char *val = NULL;
        unsigned int size;
@@ -1909,7 +1895,7 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
 
                if (arg->str.field->flags & (FIELD_IS_POINTER | FIELD_IS_LONG))
                        /* convert to a kernel symbol */
-                       val = pevent_find_function(pevent, addr);
+                       val = tep_find_function(pevent, addr);
 
                if (val == NULL) {
                        /* just use the hex of the string name */
@@ -1922,7 +1908,7 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
 }
 
 static int test_str(struct event_format *event, struct filter_arg *arg,
-                   struct pevent_record *record, enum pevent_errno *err)
+                   struct tep_record *record, enum tep_errno *err)
 {
        const char *val;
 
@@ -1947,13 +1933,13 @@ static int test_str(struct event_format *event, struct filter_arg *arg,
 
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__ILLEGAL_STRING_CMP;
+                       *err = TEP_ERRNO__ILLEGAL_STRING_CMP;
                return 0;
        }
 }
 
 static int test_op(struct event_format *event, struct filter_arg *arg,
-                  struct pevent_record *record, enum pevent_errno *err)
+                  struct tep_record *record, enum tep_errno *err)
 {
        switch (arg->op.type) {
        case FILTER_OP_AND:
@@ -1969,13 +1955,13 @@ static int test_op(struct event_format *event, struct filter_arg *arg,
 
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__INVALID_OP_TYPE;
+                       *err = TEP_ERRNO__INVALID_OP_TYPE;
                return 0;
        }
 }
 
 static int test_filter(struct event_format *event, struct filter_arg *arg,
-                      struct pevent_record *record, enum pevent_errno *err)
+                      struct tep_record *record, enum tep_errno *err)
 {
        if (*err) {
                /*
@@ -2009,20 +1995,20 @@ static int test_filter(struct event_format *event, struct filter_arg *arg,
 
        default:
                if (!*err)
-                       *err = PEVENT_ERRNO__INVALID_ARG_TYPE;
+                       *err = TEP_ERRNO__INVALID_ARG_TYPE;
                return 0;
        }
 }
 
 /**
- * pevent_event_filtered - return true if event has filter
+ * tep_event_filtered - return true if event has filter
  * @filter: filter struct with filter information
  * @event_id: event id to test if filter exists
  *
  * Returns 1 if filter found for @event_id
  *   otherwise 0;
  */
-int pevent_event_filtered(struct event_filter *filter, int event_id)
+int tep_event_filtered(struct event_filter *filter, int event_id)
 {
        struct filter_type *filter_type;
 
@@ -2035,42 +2021,42 @@ int pevent_event_filtered(struct event_filter *filter, int event_id)
 }
 
 /**
- * pevent_filter_match - test if a record matches a filter
+ * tep_filter_match - test if a record matches a filter
  * @filter: filter struct with filter information
  * @record: the record to test against the filter
  *
- * Returns: match result or error code (prefixed with PEVENT_ERRNO__)
+ * Returns: match result or error code (prefixed with TEP_ERRNO__)
  * FILTER_MATCH - filter found for event and @record matches
  * FILTER_MISS  - filter found for event and @record does not match
  * FILTER_NOT_FOUND - no filter found for @record's event
  * NO_FILTER - if no filters exist
  * otherwise - error occurred during test
  */
-enum pevent_errno pevent_filter_match(struct event_filter *filter,
-                                     struct pevent_record *record)
+enum tep_errno tep_filter_match(struct event_filter *filter,
+                               struct tep_record *record)
 {
-       struct pevent *pevent = filter->pevent;
+       struct tep_handle *pevent = filter->pevent;
        struct filter_type *filter_type;
        int event_id;
        int ret;
-       enum pevent_errno err = 0;
+       enum tep_errno err = 0;
 
        filter_init_error_buf(filter);
 
        if (!filter->filters)
-               return PEVENT_ERRNO__NO_FILTER;
+               return TEP_ERRNO__NO_FILTER;
 
-       event_id = pevent_data_type(pevent, record);
+       event_id = tep_data_type(pevent, record);
 
        filter_type = find_filter_type(filter, event_id);
        if (!filter_type)
-               return PEVENT_ERRNO__FILTER_NOT_FOUND;
+               return TEP_ERRNO__FILTER_NOT_FOUND;
 
        ret = test_filter(filter_type->event, filter_type->filter, record, &err);
        if (err)
                return err;
 
-       return ret ? PEVENT_ERRNO__FILTER_MATCH : PEVENT_ERRNO__FILTER_MISS;
+       return ret ? TEP_ERRNO__FILTER_MATCH : TEP_ERRNO__FILTER_MISS;
 }
 
 static char *op_to_str(struct event_filter *filter, struct filter_arg *arg)
@@ -2364,7 +2350,7 @@ static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
 }
 
 /**
- * pevent_filter_make_string - return a string showing the filter
+ * tep_filter_make_string - return a string showing the filter
  * @filter: filter struct with filter information
  * @event_id: the event id to return the filter string with
  *
@@ -2373,7 +2359,7 @@ static char *arg_to_str(struct event_filter *filter, struct filter_arg *arg)
  *  NULL is returned if no filter is found or allocation failed.
  */
 char *
-pevent_filter_make_string(struct event_filter *filter, int event_id)
+tep_filter_make_string(struct event_filter *filter, int event_id)
 {
        struct filter_type *filter_type;
 
@@ -2389,7 +2375,7 @@ pevent_filter_make_string(struct event_filter *filter, int event_id)
 }
 
 /**
- * pevent_filter_compare - compare two filters and return if they are the same
+ * tep_filter_compare - compare two filters and return if they are the same
  * @filter1: Filter to compare with @filter2
  * @filter2: Filter to compare with @filter1
  *
@@ -2397,7 +2383,7 @@ pevent_filter_make_string(struct event_filter *filter, int event_id)
  *  1 if the two filters hold the same content.
  *  0 if they do not.
  */
-int pevent_filter_compare(struct event_filter *filter1, struct event_filter *filter2)
+int tep_filter_compare(struct event_filter *filter1, struct event_filter *filter2)
 {
        struct filter_type *filter_type1;
        struct filter_type *filter_type2;
index eda07fa31dca1058a72989669e1cc99bc5958ab3..77e4ec6402dd3fef5e7832cbe1afe41f83db3906 100644 (file)
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index 8f8586912da7bab64a0e351b45ee4ea283ac7414..a51b366f47dad91500f540a3f2198013943556ae 100644 (file)
@@ -25,19 +25,19 @@ process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
        return val ? (long long) le16toh(*val) : 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_print_function(pevent,
-                                      process___le16_to_cpup,
-                                      PEVENT_FUNC_ARG_INT,
-                                      "__le16_to_cpup",
-                                      PEVENT_FUNC_ARG_PTR,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process___le16_to_cpup,
+                                   TEP_FUNC_ARG_INT,
+                                   "__le16_to_cpup",
+                                   TEP_FUNC_ARG_PTR,
+                                   TEP_FUNC_ARG_VOID);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_print_function(pevent, process___le16_to_cpup,
-                                        "__le16_to_cpup");
+       tep_unregister_print_function(pevent, process___le16_to_cpup,
+                                     "__le16_to_cpup");
 }
index 42dbf73758f3425f20c3401a5c64f024459ddfaf..424747475d37b727770566a983ae3b1ef57c0ec7 100644 (file)
@@ -33,7 +33,7 @@ static int cpus = -1;
 
 #define STK_BLK 10
 
-struct pevent_plugin_option plugin_options[] =
+struct tep_plugin_option plugin_options[] =
 {
        {
                .name = "parent",
@@ -53,8 +53,8 @@ struct pevent_plugin_option plugin_options[] =
        }
 };
 
-static struct pevent_plugin_option *ftrace_parent = &plugin_options[0];
-static struct pevent_plugin_option *ftrace_indent = &plugin_options[1];
+static struct tep_plugin_option *ftrace_parent = &plugin_options[0];
+static struct tep_plugin_option *ftrace_indent = &plugin_options[1];
 
 static void add_child(struct func_stack *stack, const char *child, int pos)
 {
@@ -122,25 +122,25 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
        return 0;
 }
 
-static int function_handler(struct trace_seq *s, struct pevent_record *record,
+static int function_handler(struct trace_seq *s, struct tep_record *record,
                            struct event_format *event, void *context)
 {
-       struct pevent *pevent = event->pevent;
+       struct tep_handle *pevent = event->pevent;
        unsigned long long function;
        unsigned long long pfunction;
        const char *func;
        const char *parent;
        int index = 0;
 
-       if (pevent_get_field_val(s, event, "ip", record, &function, 1))
+       if (tep_get_field_val(s, event, "ip", record, &function, 1))
                return trace_seq_putc(s, '!');
 
-       func = pevent_find_function(pevent, function);
+       func = tep_find_function(pevent, function);
 
-       if (pevent_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
+       if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
                return trace_seq_putc(s, '!');
 
-       parent = pevent_find_function(pevent, pfunction);
+       parent = tep_find_function(pevent, pfunction);
 
        if (parent && ftrace_indent->set)
                index = add_and_get_index(parent, func, record->cpu);
@@ -163,22 +163,22 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_event_handler(pevent, -1, "ftrace", "function",
-                                     function_handler, NULL);
+       tep_register_event_handler(pevent, -1, "ftrace", "function",
+                                  function_handler, NULL);
 
-       traceevent_plugin_add_options("ftrace", plugin_options);
+       tep_plugin_add_options("ftrace", plugin_options);
 
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
        int i, x;
 
-       pevent_unregister_event_handler(pevent, -1, "ftrace", "function",
-                                       function_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "ftrace", "function",
+                                    function_handler, NULL);
 
        for (i = 0; i <= cpus; i++) {
                for (x = 0; x < fstack[i].size && fstack[i].stack[x]; x++)
@@ -186,7 +186,7 @@ void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
                free(fstack[i].stack);
        }
 
-       traceevent_plugin_remove_options(plugin_options);
+       tep_plugin_remove_options(plugin_options);
 
        free(fstack);
        fstack = NULL;
index 12bf14cc11529bbba356ce11b32294f84988c204..b43bfec565d83b34baa28357b33a8148e018a58c 100644 (file)
 #include "event-parse.h"
 
 static int timer_expire_handler(struct trace_seq *s,
-                               struct pevent_record *record,
+                               struct tep_record *record,
                                struct event_format *event, void *context)
 {
        trace_seq_printf(s, "hrtimer=");
 
-       if (pevent_print_num_field(s, "0x%llx", event, "timer",
-                                  record, 0) == -1)
-               pevent_print_num_field(s, "0x%llx", event, "hrtimer",
-                                      record, 1);
+       if (tep_print_num_field(s, "0x%llx", event, "timer",
+                               record, 0) == -1)
+               tep_print_num_field(s, "0x%llx", event, "hrtimer",
+                                   record, 1);
 
        trace_seq_printf(s, " now=");
 
-       pevent_print_num_field(s, "%llu", event, "now", record, 1);
+       tep_print_num_field(s, "%llu", event, "now", record, 1);
 
-       pevent_print_func_field(s, " function=%s", event, "function",
+       tep_print_func_field(s, " function=%s", event, "function",
                                record, 0);
        return 0;
 }
 
 static int timer_start_handler(struct trace_seq *s,
-                              struct pevent_record *record,
+                              struct tep_record *record,
                               struct event_format *event, void *context)
 {
        trace_seq_printf(s, "hrtimer=");
 
-       if (pevent_print_num_field(s, "0x%llx", event, "timer",
-                                  record, 0) == -1)
-               pevent_print_num_field(s, "0x%llx", event, "hrtimer",
-                                      record, 1);
+       if (tep_print_num_field(s, "0x%llx", event, "timer",
+                               record, 0) == -1)
+               tep_print_num_field(s, "0x%llx", event, "hrtimer",
+                                   record, 1);
 
-       pevent_print_func_field(s, " function=%s", event, "function",
-                               record, 0);
+       tep_print_func_field(s, " function=%s", event, "function",
+                            record, 0);
 
        trace_seq_printf(s, " expires=");
-       pevent_print_num_field(s, "%llu", event, "expires", record, 1);
+       tep_print_num_field(s, "%llu", event, "expires", record, 1);
 
        trace_seq_printf(s, " softexpires=");
-       pevent_print_num_field(s, "%llu", event, "softexpires", record, 1);
+       tep_print_num_field(s, "%llu", event, "softexpires", record, 1);
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_event_handler(pevent, -1,
-                                     "timer", "hrtimer_expire_entry",
-                                     timer_expire_handler, NULL);
+       tep_register_event_handler(pevent, -1,
+                                  "timer", "hrtimer_expire_entry",
+                                  timer_expire_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "timer", "hrtimer_start",
-                                     timer_start_handler, NULL);
+       tep_register_event_handler(pevent, -1, "timer", "hrtimer_start",
+                                  timer_start_handler, NULL);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_event_handler(pevent, -1,
-                                       "timer", "hrtimer_expire_entry",
-                                       timer_expire_handler, NULL);
+       tep_unregister_event_handler(pevent, -1,
+                                    "timer", "hrtimer_expire_entry",
+                                    timer_expire_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
-                                       timer_start_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "timer", "hrtimer_start",
+                                    timer_start_handler, NULL);
 }
index 5c23d5bd27ce815420fa8df5a7653f3c0f6d865d..45a9acd196409a638b8f53cd6d498556b5833a15 100644 (file)
@@ -47,29 +47,29 @@ process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
        return jiffies;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_print_function(pevent,
-                                      process_jbd2_dev_to_name,
-                                      PEVENT_FUNC_ARG_STRING,
-                                      "jbd2_dev_to_name",
-                                      PEVENT_FUNC_ARG_INT,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process_jbd2_dev_to_name,
+                                   TEP_FUNC_ARG_STRING,
+                                   "jbd2_dev_to_name",
+                                   TEP_FUNC_ARG_INT,
+                                   TEP_FUNC_ARG_VOID);
 
-       pevent_register_print_function(pevent,
-                                      process_jiffies_to_msecs,
-                                      PEVENT_FUNC_ARG_LONG,
-                                      "jiffies_to_msecs",
-                                      PEVENT_FUNC_ARG_LONG,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process_jiffies_to_msecs,
+                                   TEP_FUNC_ARG_LONG,
+                                   "jiffies_to_msecs",
+                                   TEP_FUNC_ARG_LONG,
+                                   TEP_FUNC_ARG_VOID);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_print_function(pevent, process_jbd2_dev_to_name,
-                                        "jbd2_dev_to_name");
+       tep_unregister_print_function(pevent, process_jbd2_dev_to_name,
+                                     "jbd2_dev_to_name");
 
-       pevent_unregister_print_function(pevent, process_jiffies_to_msecs,
-                                        "jiffies_to_msecs");
+       tep_unregister_print_function(pevent, process_jiffies_to_msecs,
+                                     "jiffies_to_msecs");
 }
index 70650ff48d78e4d7117396e796599294bbf2fa9c..73966b05abce3a726c104384edfca9d04766c30c 100644 (file)
@@ -23,7 +23,7 @@
 
 #include "event-parse.h"
 
-static int call_site_handler(struct trace_seq *s, struct pevent_record *record,
+static int call_site_handler(struct trace_seq *s, struct tep_record *record,
                             struct event_format *event, void *context)
 {
        struct format_field *field;
@@ -31,64 +31,64 @@ static int call_site_handler(struct trace_seq *s, struct pevent_record *record,
        void *data = record->data;
        const char *func;
 
-       field = pevent_find_field(event, "call_site");
+       field = tep_find_field(event, "call_site");
        if (!field)
                return 1;
 
-       if (pevent_read_number_field(field, data, &val))
+       if (tep_read_number_field(field, data, &val))
                return 1;
 
-       func = pevent_find_function(event->pevent, val);
+       func = tep_find_function(event->pevent, val);
        if (!func)
                return 1;
 
-       addr = pevent_find_function_address(event->pevent, val);
+       addr = tep_find_function_address(event->pevent, val);
 
        trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
        return 1;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_event_handler(pevent, -1, "kmem", "kfree",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem", "kfree",
+                                  call_site_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kmem", "kmalloc",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem", "kmalloc",
+                                  call_site_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem", "kmalloc_node",
+                                  call_site_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+                                  call_site_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kmem",
-                                     "kmem_cache_alloc_node",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem",
+                                  "kmem_cache_alloc_node",
+                                  call_site_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
-                                     call_site_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+                                  call_site_handler, NULL);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_event_handler(pevent, -1, "kmem", "kfree",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem", "kfree",
+                                    call_site_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc",
+                                    call_site_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem", "kmalloc_node",
+                                    call_site_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_alloc",
+                                    call_site_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kmem",
-                                       "kmem_cache_alloc_node",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem",
+                                    "kmem_cache_alloc_node",
+                                    call_site_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
-                                       call_site_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kmem", "kmem_cache_free",
+                                    call_site_handler, NULL);
 }
index 18536f7565773b84dd29cb7a65b5df851dd4b005..1d0d159062251837f84e0133be46d922e4b4f9d9 100644 (file)
@@ -247,17 +247,17 @@ static const char *find_exit_reason(unsigned isa, int val)
        return strings[i].str;
 }
 
-static int print_exit_reason(struct trace_seq *s, struct pevent_record *record,
+static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
                             struct event_format *event, const char *field)
 {
        unsigned long long isa;
        unsigned long long val;
        const char *reason;
 
-       if (pevent_get_field_val(s, event, field, record, &val, 1) < 0)
+       if (tep_get_field_val(s, event, field, record, &val, 1) < 0)
                return -1;
 
-       if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0)
+       if (tep_get_field_val(s, event, "isa", record, &isa, 0) < 0)
                isa = 1;
 
        reason = find_exit_reason(isa, val);
@@ -268,7 +268,7 @@ static int print_exit_reason(struct trace_seq *s, struct pevent_record *record,
        return 0;
 }
 
-static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
+static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
                            struct event_format *event, void *context)
 {
        unsigned long long info1 = 0, info2 = 0;
@@ -276,10 +276,10 @@ static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
        if (print_exit_reason(s, record, event, "exit_reason") < 0)
                return -1;
 
-       pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
+       tep_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
 
-       if (pevent_get_field_val(s, event, "info1", record, &info1, 0) >= 0
-           && pevent_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
+       if (tep_get_field_val(s, event, "info1", record, &info1, 0) >= 0
+           && tep_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
                trace_seq_printf(s, " info %llx %llx", info1, info2);
 
        return 0;
@@ -291,7 +291,7 @@ static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
 
 static int kvm_emulate_insn_handler(struct trace_seq *s,
-                                   struct pevent_record *record,
+                                   struct tep_record *record,
                                    struct event_format *event, void *context)
 {
        unsigned long long rip, csbase, len, flags, failed;
@@ -299,22 +299,22 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
        uint8_t *insn;
        const char *disasm;
 
-       if (pevent_get_field_val(s, event, "rip", record, &rip, 1) < 0)
+       if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
                return -1;
 
-       if (pevent_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
+       if (tep_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
                return -1;
 
-       if (pevent_get_field_val(s, event, "len", record, &len, 1) < 0)
+       if (tep_get_field_val(s, event, "len", record, &len, 1) < 0)
                return -1;
 
-       if (pevent_get_field_val(s, event, "flags", record, &flags, 1) < 0)
+       if (tep_get_field_val(s, event, "flags", record, &flags, 1) < 0)
                return -1;
 
-       if (pevent_get_field_val(s, event, "failed", record, &failed, 1) < 0)
+       if (tep_get_field_val(s, event, "failed", record, &failed, 1) < 0)
                return -1;
 
-       insn = pevent_get_field_raw(s, event, "insn", record, &llen, 1);
+       insn = tep_get_field_raw(s, event, "insn", record, &llen, 1);
        if (!insn)
                return -1;
 
@@ -330,24 +330,24 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
 }
 
 
-static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record,
+static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
                                            struct event_format *event, void *context)
 {
        if (print_exit_reason(s, record, event, "exit_code") < 0)
                return -1;
 
-       pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
-       pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
-       pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
-       pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
+       tep_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
+       tep_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
+       tep_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
+       tep_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
 
        return 0;
 }
 
-static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record,
+static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
                                     struct event_format *event, void *context)
 {
-       pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1);
+       tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
 
        return kvm_nested_vmexit_inject_handler(s, record, event, context);
 }
@@ -370,7 +370,7 @@ union kvm_mmu_page_role {
        };
 };
 
-static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
+static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
                              struct event_format *event, void *context)
 {
        unsigned long long val;
@@ -379,7 +379,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
        };
        union kvm_mmu_page_role role;
 
-       if (pevent_get_field_val(s, event, "role", record, &val, 1) < 0)
+       if (tep_get_field_val(s, event, "role", record, &val, 1) < 0)
                return -1;
 
        role.word = (int)val;
@@ -388,8 +388,8 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
         * We can only use the structure if file is of the same
         * endianess.
         */
-       if (pevent_is_file_bigendian(event->pevent) ==
-           pevent_is_host_bigendian(event->pevent)) {
+       if (tep_is_file_bigendian(event->pevent) ==
+           tep_is_host_bigendian(event->pevent)) {
 
                trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
                                 role.level,
@@ -406,10 +406,10 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
        } else
                trace_seq_printf(s, "WORD: %08x", role.word);
 
-       pevent_print_num_field(s, " root %u ",  event,
-                              "root_count", record, 1);
+       tep_print_num_field(s, " root %u ",  event,
+                           "root_count", record, 1);
 
-       if (pevent_get_field_val(s, event, "unsync", record, &val, 1) < 0)
+       if (tep_get_field_val(s, event, "unsync", record, &val, 1) < 0)
                return -1;
 
        trace_seq_printf(s, "%s%c",  val ? "unsync" : "sync", 0);
@@ -417,17 +417,17 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct pevent_record *record,
 }
 
 static int kvm_mmu_get_page_handler(struct trace_seq *s,
-                                   struct pevent_record *record,
+                                   struct tep_record *record,
                                    struct event_format *event, void *context)
 {
        unsigned long long val;
 
-       if (pevent_get_field_val(s, event, "created", record, &val, 1) < 0)
+       if (tep_get_field_val(s, event, "created", record, &val, 1) < 0)
                return -1;
 
        trace_seq_printf(s, "%s ", val ? "new" : "existing");
 
-       if (pevent_get_field_val(s, event, "gfn", record, &val, 1) < 0)
+       if (tep_get_field_val(s, event, "gfn", record, &val, 1) < 0)
                return -1;
 
        trace_seq_printf(s, "sp gfn %llx ", val);
@@ -444,79 +444,79 @@ process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
        return pte & PT_WRITABLE_MASK;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
        init_disassembler();
 
-       pevent_register_event_handler(pevent, -1, "kvm", "kvm_exit",
-                                     kvm_exit_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kvm", "kvm_exit",
+                                  kvm_exit_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
-                                     kvm_emulate_insn_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+                                  kvm_emulate_insn_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
-                                     kvm_nested_vmexit_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+                                  kvm_nested_vmexit_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
-                                     kvm_nested_vmexit_inject_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+                                  kvm_nested_vmexit_inject_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
-                                     kvm_mmu_get_page_handler, NULL);
+       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+                                  kvm_mmu_get_page_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
-                                     kvm_mmu_print_role, NULL);
+       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+                                  kvm_mmu_print_role, NULL);
 
-       pevent_register_event_handler(pevent, -1,
-                                     "kvmmmu", "kvm_mmu_unsync_page",
-                                     kvm_mmu_print_role, NULL);
+       tep_register_event_handler(pevent, -1,
+                                  "kvmmmu", "kvm_mmu_unsync_page",
+                                  kvm_mmu_print_role, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
-                                     kvm_mmu_print_role, NULL);
+       tep_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+                                  kvm_mmu_print_role, NULL);
 
-       pevent_register_event_handler(pevent, -1, "kvmmmu",
+       tep_register_event_handler(pevent, -1, "kvmmmu",
                        "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
                        NULL);
 
-       pevent_register_print_function(pevent,
-                                      process_is_writable_pte,
-                                      PEVENT_FUNC_ARG_INT,
-                                      "is_writable_pte",
-                                      PEVENT_FUNC_ARG_LONG,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process_is_writable_pte,
+                                   TEP_FUNC_ARG_INT,
+                                   "is_writable_pte",
+                                   TEP_FUNC_ARG_LONG,
+                                   TEP_FUNC_ARG_VOID);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
-                                       kvm_exit_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_exit",
+                                    kvm_exit_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
-                                       kvm_emulate_insn_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
+                                    kvm_emulate_insn_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
-                                       kvm_nested_vmexit_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+                                    kvm_nested_vmexit_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
-                                       kvm_nested_vmexit_inject_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+                                    kvm_nested_vmexit_inject_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
-                                       kvm_mmu_get_page_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
+                                    kvm_mmu_get_page_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
-                                       kvm_mmu_print_role, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_sync_page",
+                                    kvm_mmu_print_role, NULL);
 
-       pevent_unregister_event_handler(pevent, -1,
-                                       "kvmmmu", "kvm_mmu_unsync_page",
-                                       kvm_mmu_print_role, NULL);
+       tep_unregister_event_handler(pevent, -1,
+                                    "kvmmmu", "kvm_mmu_unsync_page",
+                                    kvm_mmu_print_role, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
-                                       kvm_mmu_print_role, NULL);
+       tep_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_zap_page",
+                                    kvm_mmu_print_role, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "kvmmmu",
+       tep_unregister_event_handler(pevent, -1, "kvmmmu",
                        "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
                        NULL);
 
-       pevent_unregister_print_function(pevent, process_is_writable_pte,
-                                        "is_writable_pte");
+       tep_unregister_print_function(pevent, process_is_writable_pte,
+                                     "is_writable_pte");
 }
index 7e15a0f1c2fd280fd8f2365d0bbd214cd7415b87..de50a531620306e1f458aa85da65a64013b8dd34 100644 (file)
@@ -28,7 +28,7 @@
 static void print_string(struct trace_seq *s, struct event_format *event,
                         const char *name, const void *data)
 {
-       struct format_field *f = pevent_find_field(event, name);
+       struct format_field *f = tep_find_field(event, name);
        int offset;
        int length;
 
@@ -42,7 +42,7 @@ static void print_string(struct trace_seq *s, struct event_format *event,
 
        if (!strncmp(f->type, "__data_loc", 10)) {
                unsigned long long v;
-               if (pevent_read_number_field(f, data, &v)) {
+               if (tep_read_number_field(f, data, &v)) {
                        trace_seq_printf(s, "invalid_data_loc");
                        return;
                }
@@ -53,12 +53,12 @@ static void print_string(struct trace_seq *s, struct event_format *event,
        trace_seq_printf(s, "%.*s", length, (char *)data + offset);
 }
 
-#define SF(fn) pevent_print_num_field(s, fn ":%d", event, fn, record, 0)
-#define SFX(fn)        pevent_print_num_field(s, fn ":%#x", event, fn, record, 0)
+#define SF(fn) tep_print_num_field(s, fn ":%d", event, fn, record, 0)
+#define SFX(fn)        tep_print_num_field(s, fn ":%#x", event, fn, record, 0)
 #define SP()   trace_seq_putc(s, ' ')
 
 static int drv_bss_info_changed(struct trace_seq *s,
-                               struct pevent_record *record,
+                               struct tep_record *record,
                                struct event_format *event, void *context)
 {
        void *data = record->data;
@@ -66,7 +66,7 @@ static int drv_bss_info_changed(struct trace_seq *s,
        print_string(s, event, "wiphy_name", data);
        trace_seq_printf(s, " vif:");
        print_string(s, event, "vif_name", data);
-       pevent_print_num_field(s, "(%d)", event, "vif_type", record, 1);
+       tep_print_num_field(s, "(%d)", event, "vif_type", record, 1);
 
        trace_seq_printf(s, "\n%*s", INDENT, "");
        SF("assoc"); SP();
@@ -86,17 +86,17 @@ static int drv_bss_info_changed(struct trace_seq *s,
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_event_handler(pevent, -1, "mac80211",
-                                     "drv_bss_info_changed",
-                                     drv_bss_info_changed, NULL);
+       tep_register_event_handler(pevent, -1, "mac80211",
+                                  "drv_bss_info_changed",
+                                  drv_bss_info_changed, NULL);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_event_handler(pevent, -1, "mac80211",
-                                       "drv_bss_info_changed",
-                                       drv_bss_info_changed, NULL);
+       tep_unregister_event_handler(pevent, -1, "mac80211",
+                                    "drv_bss_info_changed",
+                                    drv_bss_info_changed, NULL);
 }
index ec30c2fcbac05c2bef0268ac9494c0c5b4254a23..eecb4bd95c11b6c4807ad879fee21ff3fee35df8 100644 (file)
@@ -45,7 +45,7 @@ static void write_state(struct trace_seq *s, int val)
 }
 
 static void write_and_save_comm(struct format_field *field,
-                               struct pevent_record *record,
+                               struct tep_record *record,
                                struct trace_seq *s, int pid)
 {
        const char *comm;
@@ -61,100 +61,100 @@ static void write_and_save_comm(struct format_field *field,
        comm = &s->buffer[len];
 
        /* Help out the comm to ids. This will handle dups */
-       pevent_register_comm(field->event->pevent, comm, pid);
+       tep_register_comm(field->event->pevent, comm, pid);
 }
 
 static int sched_wakeup_handler(struct trace_seq *s,
-                               struct pevent_record *record,
+                               struct tep_record *record,
                                struct event_format *event, void *context)
 {
        struct format_field *field;
        unsigned long long val;
 
-       if (pevent_get_field_val(s, event, "pid", record, &val, 1))
+       if (tep_get_field_val(s, event, "pid", record, &val, 1))
                return trace_seq_putc(s, '!');
 
-       field = pevent_find_any_field(event, "comm");
+       field = tep_find_any_field(event, "comm");
        if (field) {
                write_and_save_comm(field, record, s, val);
                trace_seq_putc(s, ':');
        }
        trace_seq_printf(s, "%lld", val);
 
-       if (pevent_get_field_val(s, event, "prio", record, &val, 0) == 0)
+       if (tep_get_field_val(s, event, "prio", record, &val, 0) == 0)
                trace_seq_printf(s, " [%lld]", val);
 
-       if (pevent_get_field_val(s, event, "success", record, &val, 1) == 0)
+       if (tep_get_field_val(s, event, "success", record, &val, 1) == 0)
                trace_seq_printf(s, " success=%lld", val);
 
-       if (pevent_get_field_val(s, event, "target_cpu", record, &val, 0) == 0)
+       if (tep_get_field_val(s, event, "target_cpu", record, &val, 0) == 0)
                trace_seq_printf(s, " CPU:%03llu", val);
 
        return 0;
 }
 
 static int sched_switch_handler(struct trace_seq *s,
-                               struct pevent_record *record,
+                               struct tep_record *record,
                                struct event_format *event, void *context)
 {
        struct format_field *field;
        unsigned long long val;
 
-       if (pevent_get_field_val(s, event, "prev_pid", record, &val, 1))
+       if (tep_get_field_val(s, event, "prev_pid", record, &val, 1))
                return trace_seq_putc(s, '!');
 
-       field = pevent_find_any_field(event, "prev_comm");
+       field = tep_find_any_field(event, "prev_comm");
        if (field) {
                write_and_save_comm(field, record, s, val);
                trace_seq_putc(s, ':');
        }
        trace_seq_printf(s, "%lld ", val);
 
-       if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
+       if (tep_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
                trace_seq_printf(s, "[%d] ", (int) val);
 
-       if (pevent_get_field_val(s,  event, "prev_state", record, &val, 0) == 0)
+       if (tep_get_field_val(s,  event, "prev_state", record, &val, 0) == 0)
                write_state(s, val);
 
        trace_seq_puts(s, " ==> ");
 
-       if (pevent_get_field_val(s, event, "next_pid", record, &val, 1))
+       if (tep_get_field_val(s, event, "next_pid", record, &val, 1))
                return trace_seq_putc(s, '!');
 
-       field = pevent_find_any_field(event, "next_comm");
+       field = tep_find_any_field(event, "next_comm");
        if (field) {
                write_and_save_comm(field, record, s, val);
                trace_seq_putc(s, ':');
        }
        trace_seq_printf(s, "%lld", val);
 
-       if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
+       if (tep_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
                trace_seq_printf(s, " [%d]", (int) val);
 
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_event_handler(pevent, -1, "sched", "sched_switch",
-                                     sched_switch_handler, NULL);
+       tep_register_event_handler(pevent, -1, "sched", "sched_switch",
+                                  sched_switch_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup",
-                                     sched_wakeup_handler, NULL);
+       tep_register_event_handler(pevent, -1, "sched", "sched_wakeup",
+                                  sched_wakeup_handler, NULL);
 
-       pevent_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
-                                     sched_wakeup_handler, NULL);
+       tep_register_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+                                  sched_wakeup_handler, NULL);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_event_handler(pevent, -1, "sched", "sched_switch",
-                                       sched_switch_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "sched", "sched_switch",
+                                    sched_switch_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
-                                       sched_wakeup_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup",
+                                    sched_wakeup_handler, NULL);
 
-       pevent_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
-                                       sched_wakeup_handler, NULL);
+       tep_unregister_event_handler(pevent, -1, "sched", "sched_wakeup_new",
+                                    sched_wakeup_handler, NULL);
 }
index 5e750af2b461f7e47bbf86ea21be1180a27c94b7..5ec346f6b8425cc33a52b8267680fad7d54d0d74 100644 (file)
@@ -413,21 +413,21 @@ unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_print_function(pevent,
-                                      process_scsi_trace_parse_cdb,
-                                      PEVENT_FUNC_ARG_STRING,
-                                      "scsi_trace_parse_cdb",
-                                      PEVENT_FUNC_ARG_PTR,
-                                      PEVENT_FUNC_ARG_PTR,
-                                      PEVENT_FUNC_ARG_INT,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process_scsi_trace_parse_cdb,
+                                   TEP_FUNC_ARG_STRING,
+                                   "scsi_trace_parse_cdb",
+                                   TEP_FUNC_ARG_PTR,
+                                   TEP_FUNC_ARG_PTR,
+                                   TEP_FUNC_ARG_INT,
+                                   TEP_FUNC_ARG_VOID);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
-                                        "scsi_trace_parse_cdb");
+       tep_unregister_print_function(pevent, process_scsi_trace_parse_cdb,
+                                     "scsi_trace_parse_cdb");
 }
index 690173bfa13edb51d82b2e2eb7b4529b190f2958..b2acbd6e9c86c5677fcf39e3fe966c21eb330bc4 100644 (file)
@@ -119,19 +119,19 @@ unsigned long long process_xen_hypercall_name(struct trace_seq *s,
        return 0;
 }
 
-int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
+int TEP_PLUGIN_LOADER(struct tep_handle *pevent)
 {
-       pevent_register_print_function(pevent,
-                                      process_xen_hypercall_name,
-                                      PEVENT_FUNC_ARG_STRING,
-                                      "xen_hypercall_name",
-                                      PEVENT_FUNC_ARG_INT,
-                                      PEVENT_FUNC_ARG_VOID);
+       tep_register_print_function(pevent,
+                                   process_xen_hypercall_name,
+                                   TEP_FUNC_ARG_STRING,
+                                   "xen_hypercall_name",
+                                   TEP_FUNC_ARG_INT,
+                                   TEP_FUNC_ARG_VOID);
        return 0;
 }
 
-void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
+void TEP_PLUGIN_UNLOADER(struct tep_handle *pevent)
 {
-       pevent_unregister_print_function(pevent, process_xen_hypercall_name,
-                                        "xen_hypercall_name");
+       tep_unregister_print_function(pevent, process_xen_hypercall_name,
+                                     "xen_hypercall_name");
 }
index 292dc9f1d2334f2a9cab35d70b1531d74f68fca4..e3bac4543d3b74b3414baa4cb1e3ba5418623e07 100644 (file)
@@ -1,21 +1,7 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not,  see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <stdio.h>
 #include <stdlib.h>
index 749cc6055dac32ab1d30879bda29343eed3d887d..e8c972f89357d2dc47bbe57000e503f46d964f37 100644 (file)
@@ -118,6 +118,15 @@ OPTIONS
 --group::
        Show event group information together
 
+--percent-type::
+       Set annotation percent type from following choices:
+         global-period, local-period, global-hits, local-hits
+
+       The local/global keywords set if the percentage is computed
+       in the scope of the function (local) or the whole data (global).
+       The period/hits keywords set the base the percentage is computed
+       on - the samples period or the number of samples (hits).
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1]
index 917e36fde6d8b5e30e2626a68ef4b01b2cae1929..474a4941f65dabdbc29911a7b324a0ff2fcd63e9 100644 (file)
@@ -477,6 +477,15 @@ include::itrace.txt[]
        Display monitored tasks stored in perf data. Displaying pid/tid/ppid
        plus the command string aligned to distinguish parent and child tasks.
 
+--percent-type::
+       Set annotation percent type from following choices:
+         global-period, local-period, global-hits, local-hits
+
+       The local/global keywords set if the percentage is computed
+       in the scope of the function (local) or the whole data (global).
+       The period/hits keywords set the base the percentage is computed
+       on - the samples period or the number of samples (hits).
+
 include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
index 225454416ed54b2baea044ce46c794e23c708f71..7902a5681fc89f6eda64234de379fc8122567529 100644 (file)
@@ -84,10 +84,10 @@ endif # has_clean
 endif # MAKECMDGOALS
 
 #
-# The clean target is not really parallel, don't print the jobs info:
+# Explicitly disable parallelism for the clean target.
 #
 clean:
-       $(make)
+       $(make) -j1
 
 #
 # The build-test target is not really parallel, don't print the jobs info,
index b3d1b12a5081ba10a92d19c79b38c6fb4654a597..5224ade3d5afed19b93a811a162a3ae6012c7ee7 100644 (file)
@@ -777,14 +777,12 @@ endif
        $(call QUIET_INSTALL, libexec) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 ifndef NO_LIBBPF
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, include/bpf) \
-               $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, examples/bpf) \
-               $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-headers) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-examples) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
+               $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
 endif
        $(call QUIET_INSTALL, perf-archive) \
                $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
index f013b115dc860d001120233df6d912ad35c842ec..dbef716a19135fffaf2afb1adedc418a3effd420 100644 (file)
@@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 
 out    := $(OUTPUT)arch/arm64/include/generated/asm
 header := $(out)/syscalls.c
-sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+incpath := $(srctree)/tools
+sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
 sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
 systbl := $(sysprf)/mksyscalltbl
 
@@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl
 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
 
 $(header): $(sysdef) $(systbl)
-       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
 
 clean::
        $(call QUIET_CLEAN, arm64) $(RM) $(header)
index 52e197317d3ee22b82ced11a0b2ee4a50dd4b837..2dbb8cade048f76b4b43d88d5d9e27c09e025e0f 100755 (executable)
@@ -11,7 +11,8 @@
 
 gcc=$1
 hostcc=$2
-input=$3
+incpath=$3
+input=$4
 
 if ! test -r $input; then
        echo "Could not read input file" >&2
@@ -28,7 +29,6 @@ create_table_from_c()
 
        cat <<-_EoHEADER
                #include <stdio.h>
-               #define __ARCH_WANT_RENAMEAT
                #include "$input"
                int main(int argc, char *argv[])
                {
@@ -42,7 +42,7 @@ create_table_from_c()
        printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
        printf "}\n"
 
-       } | $hostcc -o $create_table_exe -x c -
+       } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
 
        $create_table_exe
 
index 1120e39c1b001bb6f4915aa2c11dd821962b63ab..5ccfce87e6937794fd434f83b8e61bb159826510 100644 (file)
@@ -194,6 +194,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
        sper->itr.read_finish = arm_spe_read_finish;
        sper->itr.alignment = 0;
 
+       *err = 0;
        return &sper->itr;
 }
 
index 53d83d7e6a096a4d49a0b6f0cbfafc15a27866af..10a44e946f7734b911ed00f74184f754b09d56ba 100644 (file)
@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
 
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
                             struct symbol *symb __maybe_unused)
 {
        char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
        /* Skip over any initial dot */
        if (*sym == '.')
                sym++;
+#endif
 
        /* Avoid "SyS" kernel syscall aliases */
        if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
        return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
@@ -141,8 +143,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
        for (i = 0; i < ntevs; i++) {
                tev = &pev->tevs[i];
                map__for_each_symbol(map, sym, tmp) {
-                       if (map->unmap_ip(map, sym->start) == tev->point.address)
+                       if (map->unmap_ip(map, sym->start) == tev->point.address) {
                                arch__fix_tev_from_maps(pev, tev, map, sym);
+                               break;
+                       }
                }
        }
 }
index 3afe8256eff275ef94c277dfae9a8c865615d681..44c85738889740b6a81f444510274c64de497785 100644 (file)
@@ -30,6 +30,7 @@ cpumsf_info_fill(struct auxtrace_record *itr __maybe_unused,
                 struct auxtrace_info_event *auxtrace_info __maybe_unused,
                 size_t priv_size __maybe_unused)
 {
+       auxtrace_info->type = PERF_AUXTRACE_S390_CPUMSF;
        return 0;
 }
 
index 1a38e78117ce6f410cc0521e09fedeaa31cf8519..8cc6642fce7a6699e3e3fd816ced37d7ae627f5a 100644 (file)
@@ -19,9 +19,6 @@ systbl := $(sys)/syscalltbl.sh
 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
 
 $(header): $(sys)/syscall_64.tbl $(systbl)
-       @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
-        (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \
-        || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true
        $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
 
 clean::
index c1bd979b957be76a2ff55f45d0d4011c056e0de7..613709cfbbd03d45e2c6254c2fb5a95f7f5415b5 100644 (file)
@@ -9,6 +9,7 @@ struct test;
 int test__rdpmc(struct test *test __maybe_unused, int subtest);
 int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
 int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__bp_modify(struct test *test, int subtest);
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 8e2c5a38c3b90c18c2159b8f91e54500734cd9f3..586849ff83a079468abc96294077c28b68a3e0c0 100644 (file)
@@ -5,3 +5,4 @@ libperf-y += arch-tests.o
 libperf-y += rdpmc.o
 libperf-y += perf-time-to-tsc.o
 libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-$(CONFIG_X86_64) += bp-modify.o
index cc1802ff54109ebccd23130fcc37f023d6e5ae9c..d47d3f8e3c8e076111d47c6ca51252552df31117 100644 (file)
@@ -23,6 +23,12 @@ struct test arch_tests[] = {
                .desc = "x86 instruction decoder - new instructions",
                .func = test__insn_x86,
        },
+#endif
+#if defined(__x86_64__)
+       {
+               .desc = "x86 bp modify",
+               .func = test__bp_modify,
+       },
 #endif
        {
                .func = NULL,
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
new file mode 100644 (file)
index 0000000..f53e440
--- /dev/null
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/user.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ptrace.h>
+#include <asm/ptrace.h>
+#include <errno.h>
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+static noinline int bp_1(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static noinline int bp_2(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static int spawn_child(void)
+{
+       int child = fork();
+
+       if (child == 0) {
+               /*
+                * The child sets itself for as tracee and
+                * waits in signal for parent to trace it,
+                * then it calls bp_1 and quits.
+                */
+               int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL);
+
+               if (err) {
+                       pr_debug("failed to PTRACE_TRACEME\n");
+                       exit(1);
+               }
+
+               raise(SIGCONT);
+               bp_1();
+               exit(0);
+       }
+
+       return child;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it and checks it was properly changed.
+ */
+static int bp_modify1(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_2 function
+        *  - changes that breakponit to bp_1 function
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_2)) {
+               pr_debug("failed to set breakpoint, 1st time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint, 2nd time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it to bogus value and checks the original
+ * breakpoint is hit.
+ */
+static int bp_modify2(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_1 function
+        *  - tries to change that breakpoint to (-1) address
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (!ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
+               pr_debug("failed, breakpoint set to bogus address\n");
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+int test__bp_modify(struct test *test __maybe_unused,
+                   int subtest __maybe_unused)
+{
+       TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
+       TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2());
+
+       return 0;
+}
index 8180319285af3377810c30c0298f37c73cb9bb8d..830481b8db26ae379d7e9816c130bdb49be04bd7 100644 (file)
@@ -542,6 +542,10 @@ int cmd_annotate(int argc, const char **argv)
        OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
                             "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
                             stdio__config_color, "always"),
+       OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
+                    "Set percent type local/global-period/hits",
+                    annotate_parse_percent_type),
+
        OPT_END()
        };
        int ret;
index 54d3f21b0e623eced87ba6b57589b05152150f55..b63bca4b0c2a6ec466caac6aa094bc2d41e823ee 100644 (file)
@@ -729,7 +729,7 @@ static char *compact_gfp_string(unsigned long gfp_flags)
 static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
                           unsigned int gfp_flags)
 {
-       struct pevent_record record = {
+       struct tep_record record = {
                .cpu = sample->cpu,
                .data = sample->raw_data,
                .size = sample->raw_size,
@@ -747,7 +747,7 @@ static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
        }
 
        trace_seq_init(&seq);
-       pevent_event_info(&seq, evsel->tp_format, &record);
+       tep_event_info(&seq, evsel->tp_format, &record);
 
        str = strtok_r(seq.buffer, " ", &pos);
        while (str) {
@@ -1974,7 +1974,7 @@ int cmd_kmem(int argc, const char **argv)
                        goto out_delete;
                }
 
-               kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
+               kmem_page_size = tep_get_page_size(evsel->tp_format->pevent);
                symbol_conf.use_callchain = true;
        }
 
index 02f7a3c27761f717b9e8c5cf39d4788b2014297b..76e12bcd17654a3185fdeeb88ab9a56b7e10c6ee 100644 (file)
@@ -1124,6 +1124,9 @@ int cmd_report(int argc, const char **argv)
                   "Time span of interest (start,stop)"),
        OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name,
                    "Show inline function"),
+       OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
+                    "Set percent type local/global-period/hits",
+                    annotate_parse_percent_type),
        OPT_END()
        };
        struct perf_data data = {
@@ -1366,9 +1369,9 @@ repeat:
        }
 
        if (session->tevent.pevent &&
-           pevent_set_function_resolver(session->tevent.pevent,
-                                        machine__resolve_kernel_addr,
-                                        &session->machines.host) < 0) {
+           tep_set_function_resolver(session->tevent.pevent,
+                                     machine__resolve_kernel_addr,
+                                     &session->machines.host) < 0) {
                pr_err("%s: failed to set libtraceevent function resolver\n",
                       __func__);
                return -1;
index 568ddfac3213e084c1f4c6077cd73943bf0644b9..ba481d73f910fbdf2388d02d24afe8747528ab2e 100644 (file)
@@ -3429,9 +3429,9 @@ int cmd_script(int argc, const char **argv)
                symbol_conf.use_callchain = false;
 
        if (session->tevent.pevent &&
-           pevent_set_function_resolver(session->tevent.pevent,
-                                        machine__resolve_kernel_addr,
-                                        &session->machines.host) < 0) {
+           tep_set_function_resolver(session->tevent.pevent,
+                                     machine__resolve_kernel_addr,
+                                     &session->machines.host) < 0) {
                pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
                err = -1;
                goto out_delete;
index 88561eed79505f737570aeacd10cd0ec0b8db0b8..22ab8e67c7600865d7fc7a884feba24f15bbe66b 100644 (file)
@@ -77,7 +77,8 @@ struct trace {
                struct syscall  *table;
                struct {
                        struct perf_evsel *sys_enter,
-                                         *sys_exit;
+                                         *sys_exit,
+                                         *augmented;
                }               events;
        } syscalls;
        struct record_opts      opts;
@@ -121,7 +122,6 @@ struct trace {
        bool                    force;
        bool                    vfs_getname;
        int                     trace_pgfaults;
-       int                     open_id;
 };
 
 struct tp_field {
@@ -157,13 +157,11 @@ TP_UINT_FIELD__SWAPPED(16);
 TP_UINT_FIELD__SWAPPED(32);
 TP_UINT_FIELD__SWAPPED(64);
 
-static int tp_field__init_uint(struct tp_field *field,
-                              struct format_field *format_field,
-                              bool needs_swap)
+static int __tp_field__init_uint(struct tp_field *field, int size, int offset, bool needs_swap)
 {
-       field->offset = format_field->offset;
+       field->offset = offset;
 
-       switch (format_field->size) {
+       switch (size) {
        case 1:
                field->integer = tp_field__u8;
                break;
@@ -183,18 +181,28 @@ static int tp_field__init_uint(struct tp_field *field,
        return 0;
 }
 
+static int tp_field__init_uint(struct tp_field *field, struct format_field *format_field, bool needs_swap)
+{
+       return __tp_field__init_uint(field, format_field->size, format_field->offset, needs_swap);
+}
+
 static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
 {
        return sample->raw_data + field->offset;
 }
 
-static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
+static int __tp_field__init_ptr(struct tp_field *field, int offset)
 {
-       field->offset = format_field->offset;
+       field->offset = offset;
        field->pointer = tp_field__ptr;
        return 0;
 }
 
+static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
+{
+       return __tp_field__init_ptr(field, format_field->offset);
+}
+
 struct syscall_tp {
        struct tp_field id;
        union {
@@ -240,7 +248,47 @@ static void perf_evsel__delete_priv(struct perf_evsel *evsel)
        perf_evsel__delete(evsel);
 }
 
-static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
+static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel)
+{
+       struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
+
+       if (evsel->priv != NULL) {
+               if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr"))
+                       goto out_delete;
+               return 0;
+       }
+
+       return -ENOMEM;
+out_delete:
+       zfree(&evsel->priv);
+       return -ENOENT;
+}
+
+static int perf_evsel__init_augmented_syscall_tp(struct perf_evsel *evsel)
+{
+       struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
+
+       if (evsel->priv != NULL) {       /* field, sizeof_field, offsetof_field */
+               if (__tp_field__init_uint(&sc->id, sizeof(long), sizeof(long long), evsel->needs_swap))
+                       goto out_delete;
+
+               return 0;
+       }
+
+       return -ENOMEM;
+out_delete:
+       zfree(&evsel->priv);
+       return -EINVAL;
+}
+
+static int perf_evsel__init_augmented_syscall_tp_args(struct perf_evsel *evsel)
+{
+       struct syscall_tp *sc = evsel->priv;
+
+       return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
+}
+
+static int perf_evsel__init_raw_syscall_tp(struct perf_evsel *evsel, void *handler)
 {
        evsel->priv = malloc(sizeof(struct syscall_tp));
        if (evsel->priv != NULL) {
@@ -258,7 +306,7 @@ out_delete:
        return -ENOENT;
 }
 
-static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
+static struct perf_evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
 {
        struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
 
@@ -269,7 +317,7 @@ static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void
        if (IS_ERR(evsel))
                return NULL;
 
-       if (perf_evsel__init_syscall_tp(evsel, handler))
+       if (perf_evsel__init_raw_syscall_tp(evsel, handler))
                goto out_delete;
 
        return evsel;
@@ -805,12 +853,17 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
        return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
 }
 
+/*
+ * is_exit: is this "exit" or "exit_group"?
+ * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
+ */
 struct syscall {
        struct event_format *tp_format;
        int                 nr_args;
+       bool                is_exit;
+       bool                is_open;
        struct format_field *args;
        const char          *name;
-       bool                is_exit;
        struct syscall_fmt  *fmt;
        struct syscall_arg_fmt *arg_fmt;
 };
@@ -1299,6 +1352,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        }
 
        sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
+       sc->is_open = !strcmp(name, "open") || !strcmp(name, "openat");
 
        return syscall__set_arg_fmts(sc);
 }
@@ -1661,6 +1715,37 @@ out_put:
        return err;
 }
 
+static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evsel,
+                                   struct perf_sample *sample)
+{
+       struct thread_trace *ttrace;
+       struct thread *thread;
+       int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
+       struct syscall *sc = trace__syscall_info(trace, evsel, id);
+       char msg[1024];
+       void *args;
+
+       if (sc == NULL)
+               return -1;
+
+       thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+       ttrace = thread__trace(thread, trace->output);
+       /*
+        * We need to get ttrace just to make sure it is there when syscall__scnprintf_args()
+        * and the rest of the beautifiers accessing it via struct syscall_arg touches it.
+        */
+       if (ttrace == NULL)
+               goto out_put;
+
+       args = perf_evsel__sc_tp_ptr(evsel, args, sample);
+       syscall__scnprintf_args(sc, msg, sizeof(msg), args, trace, thread);
+       fprintf(trace->output, "%s", msg);
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
+}
+
 static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
                                    struct perf_sample *sample,
                                    struct callchain_cursor *cursor)
@@ -1722,7 +1807,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
 
        ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
 
-       if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
+       if (sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
                trace__set_fd_pathname(thread, ret, ttrace->filename.name);
                ttrace->filename.pending_open = false;
                ++trace->stats.vfs_getname;
@@ -1957,11 +2042,17 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
        fprintf(trace->output, "%s:", evsel->name);
 
        if (perf_evsel__is_bpf_output(evsel)) {
-               bpf_output__fprintf(trace, sample);
+               if (evsel == trace->syscalls.events.augmented)
+                       trace__fprintf_sys_enter(trace, evsel, sample);
+               else
+                       bpf_output__fprintf(trace, sample);
        } else if (evsel->tp_format) {
-               event_format__fprintf(evsel->tp_format, sample->cpu,
-                                     sample->raw_data, sample->raw_size,
-                                     trace->output);
+               if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
+                   trace__fprintf_sys_enter(trace, evsel, sample)) {
+                       event_format__fprintf(evsel->tp_format, sample->cpu,
+                                             sample->raw_data, sample->raw_size,
+                                             trace->output);
+               }
        }
 
        fprintf(trace->output, "\n");
@@ -2242,14 +2333,14 @@ static int trace__add_syscall_newtp(struct trace *trace)
        struct perf_evlist *evlist = trace->evlist;
        struct perf_evsel *sys_enter, *sys_exit;
 
-       sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
+       sys_enter = perf_evsel__raw_syscall_newtp("sys_enter", trace__sys_enter);
        if (sys_enter == NULL)
                goto out;
 
        if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
                goto out_delete_sys_enter;
 
-       sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
+       sys_exit = perf_evsel__raw_syscall_newtp("sys_exit", trace__sys_exit);
        if (sys_exit == NULL)
                goto out_delete_sys_enter;
 
@@ -2671,7 +2762,7 @@ static int trace__replay(struct trace *trace)
                                                             "syscalls:sys_enter");
 
        if (evsel &&
-           (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
+           (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
            perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
                pr_err("Error during initialize raw_syscalls:sys_enter event\n");
                goto out;
@@ -2683,7 +2774,7 @@ static int trace__replay(struct trace *trace)
                evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
                                                             "syscalls:sys_exit");
        if (evsel &&
-           (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
+           (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
            perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
                pr_err("Error during initialize raw_syscalls:sys_exit event\n");
                goto out;
@@ -2923,6 +3014,36 @@ static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
                evsel->handler = handler;
 }
 
+static int evlist__set_syscall_tp_fields(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->priv || !evsel->tp_format)
+                       continue;
+
+               if (strcmp(evsel->tp_format->system, "syscalls"))
+                       continue;
+
+               if (perf_evsel__init_syscall_tp(evsel))
+                       return -1;
+
+               if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
+                       struct syscall_tp *sc = evsel->priv;
+
+                       if (__tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64)))
+                               return -1;
+               } else if (!strncmp(evsel->tp_format->name, "sys_exit_", 9)) {
+                       struct syscall_tp *sc = evsel->priv;
+
+                       if (__tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap))
+                               return -1;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * XXX: Hackish, just splitting the combined -e+--event (syscalls
  * (raw_syscalls:{sys_{enter,exit}} + events (tracepoints, HW, SW, etc) to use
@@ -3123,8 +3244,9 @@ int cmd_trace(int argc, const char **argv)
        };
        bool __maybe_unused max_stack_user_set = true;
        bool mmap_pages_user_set = true;
+       struct perf_evsel *evsel;
        const char * const trace_subcommands[] = { "record", NULL };
-       int err;
+       int err = -1;
        char bf[BUFSIZ];
 
        signal(SIGSEGV, sighandler_dump_stack);
@@ -3147,6 +3269,20 @@ int cmd_trace(int argc, const char **argv)
                                       "cgroup monitoring only available in system-wide mode");
        }
 
+       evsel = bpf__setup_output_event(trace.evlist, "__augmented_syscalls__");
+       if (IS_ERR(evsel)) {
+               bpf__strerror_setup_output_event(trace.evlist, PTR_ERR(evsel), bf, sizeof(bf));
+               pr_err("ERROR: Setup trace syscalls enter failed: %s\n", bf);
+               goto out;
+       }
+
+       if (evsel) {
+               if (perf_evsel__init_augmented_syscall_tp(evsel) ||
+                   perf_evsel__init_augmented_syscall_tp_args(evsel))
+                       goto out;
+               trace.syscalls.events.augmented = evsel;
+       }
+
        err = bpf__setup_stdout(trace.evlist);
        if (err) {
                bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
@@ -3182,8 +3318,13 @@ int cmd_trace(int argc, const char **argv)
                symbol_conf.use_callchain = true;
        }
 
-       if (trace.evlist->nr_entries > 0)
+       if (trace.evlist->nr_entries > 0) {
                evlist__set_evsel_handler(trace.evlist, trace__event_handler);
+               if (evlist__set_syscall_tp_fields(trace.evlist)) {
+                       perror("failed to set syscalls:* tracepoint fields");
+                       goto out;
+               }
+       }
 
        if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
                return trace__record(&trace, argc-1, &argv[1]);
@@ -3205,8 +3346,6 @@ int cmd_trace(int argc, const char **argv)
                }
        }
 
-       trace.open_id = syscalltbl__id(trace.sctbl, "open");
-
        err = target__validate(&trace.opts.target);
        if (err) {
                target__strerror(&trace.opts.target, err, bf, sizeof(bf));
index de28466c0186bf78652c99926c5449eaa5dc6a0b..466540ee8ea79f18743658bd76903531829cfffb 100755 (executable)
@@ -67,8 +67,12 @@ check_2 () {
 
   cmd="diff $* $file1 $file2 > /dev/null"
 
-  test -f $file2 &&
-  eval $cmd || echo "Warning: Kernel ABI header at 'tools/$file' differs from latest version at '$file'" >&2
+  test -f $file2 && {
+    eval $cmd || {
+      echo "Warning: Kernel ABI header at '$file1' differs from latest version at '$file2'" >&2
+      echo diff -u $file1 $file2
+    }
+  }
 }
 
 check () {
@@ -76,7 +80,7 @@ check () {
 
   shift
 
-  check_2 ../$file ../../$file $*
+  check_2 tools/$file $file $*
 }
 
 # Check if we have the kernel headers (tools/perf/../../include), else
@@ -84,6 +88,8 @@ check () {
 # differences.
 test -d ../../include || exit 0
 
+cd ../..
+
 # simple diff check
 for i in $HEADERS; do
   check $i -B
@@ -94,3 +100,8 @@ check arch/x86/lib/memcpy_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/ex
 check arch/x86/lib/memset_64.S        '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
 check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"'
 check include/uapi/linux/mman.h       '-I "^#include <\(uapi/\)*asm/mman.h>"'
+
+# diff non-symmetric files
+check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
+
+cd tools/perf
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
new file mode 100644 (file)
index 0000000..69a3138
--- /dev/null
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the openat syscall with the contents of the filename pointer argument.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * It'll catch some openat syscalls related to the dynamic linked and
+ * the last one should be the one for '/etc/passwd'.
+ *
+ * This matches what is marshalled into the raw_syscall:sys_enter payload
+ * expected by the 'perf trace' beautifiers, and can be used by them unmodified,
+ * which will be done as that feature is implemented in the next csets, for now
+ * it will appear in a dump done by the default tracepoint handler in 'perf trace',
+ * that uses bpf_output__fprintf() to just dump those contents, as done with
+ * the bpf-output event associated with the __bpf_output__ map declared in
+ * tools/perf/include/bpf/stdio.h.
+ */
+
+#include <stdio.h>
+
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_openat_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       long               dfd;
+       char               *filename_ptr;
+       long               flags;
+       long               mode;
+};
+
+struct augmented_enter_openat_args {
+       struct syscall_enter_openat_args args;
+       char                             filename[64];
+};
+
+int syscall_enter(openat)(struct syscall_enter_openat_args *args)
+{
+       struct augmented_enter_openat_args augmented_args;
+
+       probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+       probe_read_str(&augmented_args.filename, sizeof(augmented_args.filename), args->filename_ptr);
+       perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU,
+                         &augmented_args, sizeof(augmented_args));
+       return 1;
+}
+
+license(GPL);
diff --git a/tools/perf/examples/bpf/hello.c b/tools/perf/examples/bpf/hello.c
new file mode 100644 (file)
index 0000000..cf3c2fd
--- /dev/null
@@ -0,0 +1,9 @@
+#include <stdio.h>
+
+int syscall_enter(openat)(void *args)
+{
+       puts("Hello, world\n");
+       return 0;
+}
+
+license(GPL);
diff --git a/tools/perf/examples/bpf/sys_enter_openat.c b/tools/perf/examples/bpf/sys_enter_openat.c
new file mode 100644 (file)
index 0000000..9cd124b
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hook into 'openat' syscall entry tracepoint
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/sys_enter_openat.c cat /etc/passwd > /dev/null
+ *
+ * It'll catch some openat syscalls related to the dynamic linked and
+ * the last one should be the one for '/etc/passwd'.
+ *
+ * The syscall_enter_openat_args can be used to get the syscall fields
+ * and use them for filtering calls, i.e. use in expressions for
+ * the return value.
+ */
+
+#include <bpf.h>
+
+struct syscall_enter_openat_args {
+       unsigned long long unused;
+       long               syscall_nr;
+       long               dfd;
+       char               *filename_ptr;
+       long               flags;
+       long               mode;
+};
+
+int syscall_enter(openat)(struct syscall_enter_openat_args *args)
+{
+       return 1;
+}
+
+license(GPL);
index a63aa6241b7f36dd4f21f46e2cae350e6823f791..47897d65e799b31e812ac3bf02ad0584756c823d 100644 (file)
@@ -4,13 +4,33 @@
 
 #include <uapi/linux/bpf.h>
 
+/*
+ * A helper structure used by eBPF C program to describe map attributes to
+ * elf_bpf loader, taken from tools/testing/selftests/bpf/bpf_helpers.h:
+ */
+struct bpf_map {
+        unsigned int type;
+        unsigned int key_size;
+        unsigned int value_size;
+        unsigned int max_entries;
+        unsigned int map_flags;
+        unsigned int inner_map_idx;
+        unsigned int numa_node;
+};
+
 #define SEC(NAME) __attribute__((section(NAME),  used))
 
 #define probe(function, vars) \
        SEC(#function "=" #function " " #vars) function
 
+#define syscall_enter(name) \
+       SEC("syscalls:sys_enter_" #name) syscall_enter_ ## name
+
 #define license(name) \
 char _license[] SEC("license") = #name; \
 int _version SEC("version") = LINUX_VERSION_CODE;
 
+static int (*probe_read)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read;
+static int (*probe_read_str)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read_str;
+
 #endif /* _PERF_BPF_H */
diff --git a/tools/perf/include/bpf/stdio.h b/tools/perf/include/bpf/stdio.h
new file mode 100644 (file)
index 0000000..2899cb7
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <bpf.h>
+
+struct bpf_map SEC("maps") __bpf_stdout__ = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = __NR_CPUS__,
+};
+
+static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) =
+       (void *)BPF_FUNC_perf_event_output;
+
+#define puts(from) \
+       ({ const int __len = sizeof(from); \
+          char __from[__len] = from; \
+          perf_event_output(args, &__bpf_stdout__, BPF_F_CURRENT_CPU, \
+                         &__from, __len & (sizeof(from) - 1)); })
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/core-imp-def.json
new file mode 100644 (file)
index 0000000..bc03c06
--- /dev/null
@@ -0,0 +1,32 @@
+[
+    {
+        "ArchStdEvent": "L1D_CACHE_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_REFILL_WR",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_RD",
+    },
+    {
+        "ArchStdEvent": "L1D_TLB_WR",
+    },
+    {
+        "ArchStdEvent": "BUS_ACCESS_RD",
+   },
+   {
+        "ArchStdEvent": "BUS_ACCESS_WR",
+   }
+]
index f03e26ecb658ef8a4be4db8b6b0bfc09f8b8cfa7..59cd8604b0bd5757167faf5c1d8c82010e953015 100644 (file)
@@ -16,3 +16,4 @@
 0x00000000420f5160,v1,cavium/thunderx2,core
 0x00000000430f0af0,v1,cavium/thunderx2,core
 0x00000000480fd010,v1,hisilicon/hip08,core
+0x00000000500f0000,v1,ampere/emag,core
index 47bedf25ba6960b3c7cf8345396d86ff0304a1ee..96e7fc1ad3f95610dc01364a4f2e94a6d5ffa5bd 100644 (file)
@@ -16,8 +16,6 @@ static unsigned long *get_bitmap(const char *str, int nbits)
        bm = bitmap_alloc(nbits);
 
        if (map && bm) {
-               bitmap_zero(bm, nbits);
-
                for (i = 0; i < map->nr; i++)
                        set_bit(map->map[i], bm);
        }
index 4892bd2dc33e6b9a8a0d94ccad396b57e70f28c9..6b049f3f5cf4e794765b8ac94d8a73814ba7aba9 100644 (file)
@@ -232,6 +232,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
        u64 objdump_addr;
        const char *objdump_name;
        char decomp_name[KMOD_DECOMP_LEN];
+       bool decomp = false;
        int ret;
 
        pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -305,6 +306,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                        return -1;
                }
 
+               decomp = true;
                objdump_name = decomp_name;
        }
 
@@ -312,7 +314,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
        objdump_addr = map__rip_2objdump(al.map, al.addr);
        ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
 
-       if (dso__needs_decompress(al.map->dso))
+       if (decomp)
                unlink(objdump_name);
 
        if (ret > 0) {
index 148dd31cc2019fcc0d40231d1a9c0465dcb795e1..0579a70bbbff139cfa6a4f2fffd20ac5cafd7dd0 100644 (file)
@@ -5,34 +5,28 @@
 #include "dso.h"
 #include "debug.h"
 
-static int test(const char *path, bool alloc_name, bool alloc_ext,
-               bool kmod, bool comp, const char *name, const char *ext)
+static int test(const char *path, bool alloc_name, bool kmod,
+               int comp, const char *name)
 {
        struct kmod_path m;
 
        memset(&m, 0x0, sizeof(m));
 
        TEST_ASSERT_VAL("kmod_path__parse",
-                       !__kmod_path__parse(&m, path, alloc_name, alloc_ext));
+                       !__kmod_path__parse(&m, path, alloc_name));
 
-       pr_debug("%s - alloc name %d, alloc ext %d, kmod %d, comp %d, name '%s', ext '%s'\n",
-                path, alloc_name, alloc_ext, m.kmod, m.comp, m.name, m.ext);
+       pr_debug("%s - alloc name %d, kmod %d, comp %d, name '%s'\n",
+                path, alloc_name, m.kmod, m.comp, m.name);
 
        TEST_ASSERT_VAL("wrong kmod", m.kmod == kmod);
        TEST_ASSERT_VAL("wrong comp", m.comp == comp);
 
-       if (ext)
-               TEST_ASSERT_VAL("wrong ext", m.ext && !strcmp(ext, m.ext));
-       else
-               TEST_ASSERT_VAL("wrong ext", !m.ext);
-
        if (name)
                TEST_ASSERT_VAL("wrong name", m.name && !strcmp(name, m.name));
        else
                TEST_ASSERT_VAL("wrong name", !m.name);
 
        free(m.name);
-       free(m.ext);
        return 0;
 }
 
@@ -45,118 +39,118 @@ static int test_is_kernel_module(const char *path, int cpumode, bool expect)
        return 0;
 }
 
-#define T(path, an, ae, k, c, n, e) \
-       TEST_ASSERT_VAL("failed", !test(path, an, ae, k, c, n, e))
+#define T(path, an, k, c, n) \
+       TEST_ASSERT_VAL("failed", !test(path, an, k, c, n))
 
 #define M(path, c, e) \
        TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
 
 int test__kmod_path__parse(struct test *t __maybe_unused, int subtest __maybe_unused)
 {
-       /* path                alloc_name  alloc_ext   kmod  comp   name     ext */
-       T("/xxxx/xxxx/x-x.ko", true      , true      , true, false, "[x_x]", NULL);
-       T("/xxxx/xxxx/x-x.ko", false     , true      , true, false, NULL   , NULL);
-       T("/xxxx/xxxx/x-x.ko", true      , false     , true, false, "[x_x]", NULL);
-       T("/xxxx/xxxx/x-x.ko", false     , false     , true, false, NULL   , NULL);
+       /* path                alloc_name  kmod  comp   name   */
+       T("/xxxx/xxxx/x-x.ko", true      , true, 0    , "[x_x]");
+       T("/xxxx/xxxx/x-x.ko", false     , true, 0    , NULL   );
+       T("/xxxx/xxxx/x-x.ko", true      , true, 0    , "[x_x]");
+       T("/xxxx/xxxx/x-x.ko", false     , true, 0    , NULL   );
        M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
        M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true);
        M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false);
 
 #ifdef HAVE_ZLIB_SUPPORT
-       /* path                alloc_name  alloc_ext   kmod  comp  name   ext */
-       T("/xxxx/xxxx/x.ko.gz", true     , true      , true, true, "[x]", "gz");
-       T("/xxxx/xxxx/x.ko.gz", false    , true      , true, true, NULL , "gz");
-       T("/xxxx/xxxx/x.ko.gz", true     , false     , true, true, "[x]", NULL);
-       T("/xxxx/xxxx/x.ko.gz", false    , false     , true, true, NULL , NULL);
+       /* path                alloc_name   kmod  comp  name  */
+       T("/xxxx/xxxx/x.ko.gz", true     , true, 1   , "[x]");
+       T("/xxxx/xxxx/x.ko.gz", false    , true, 1   , NULL );
+       T("/xxxx/xxxx/x.ko.gz", true     , true, 1   , "[x]");
+       T("/xxxx/xxxx/x.ko.gz", false    , true, 1   , NULL );
        M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
        M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
        M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_USER, false);
 
-       /* path              alloc_name  alloc_ext  kmod   comp  name    ext */
-       T("/xxxx/xxxx/x.gz", true      , true     , false, true, "x.gz" ,"gz");
-       T("/xxxx/xxxx/x.gz", false     , true     , false, true, NULL   ,"gz");
-       T("/xxxx/xxxx/x.gz", true      , false    , false, true, "x.gz" , NULL);
-       T("/xxxx/xxxx/x.gz", false     , false    , false, true, NULL   , NULL);
+       /* path              alloc_name  kmod   comp  name  */
+       T("/xxxx/xxxx/x.gz", true      , false, 1   , "x.gz");
+       T("/xxxx/xxxx/x.gz", false     , false, 1   , NULL  );
+       T("/xxxx/xxxx/x.gz", true      , false, 1   , "x.gz");
+       T("/xxxx/xxxx/x.gz", false     , false, 1   , NULL  );
        M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_KERNEL, false);
        M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_USER, false);
 
-       /* path   alloc_name  alloc_ext  kmod   comp  name     ext */
-       T("x.gz", true      , true     , false, true, "x.gz", "gz");
-       T("x.gz", false     , true     , false, true, NULL  , "gz");
-       T("x.gz", true      , false    , false, true, "x.gz", NULL);
-       T("x.gz", false     , false    , false, true, NULL  , NULL);
+       /* path   alloc_name  kmod   comp  name   */
+       T("x.gz", true      , false, 1   , "x.gz");
+       T("x.gz", false     , false, 1   , NULL  );
+       T("x.gz", true      , false, 1   , "x.gz");
+       T("x.gz", false     , false, 1   , NULL  );
        M("x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("x.gz", PERF_RECORD_MISC_KERNEL, false);
        M("x.gz", PERF_RECORD_MISC_USER, false);
 
-       /* path      alloc_name  alloc_ext  kmod  comp  name  ext */
-       T("x.ko.gz", true      , true     , true, true, "[x]", "gz");
-       T("x.ko.gz", false     , true     , true, true, NULL , "gz");
-       T("x.ko.gz", true      , false    , true, true, "[x]", NULL);
-       T("x.ko.gz", false     , false    , true, true, NULL , NULL);
+       /* path      alloc_name  kmod  comp  name  */
+       T("x.ko.gz", true      , true, 1   , "[x]");
+       T("x.ko.gz", false     , true, 1   , NULL );
+       T("x.ko.gz", true      , true, 1   , "[x]");
+       T("x.ko.gz", false     , true, 1   , NULL );
        M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
        M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
        M("x.ko.gz", PERF_RECORD_MISC_USER, false);
 #endif
 
-       /* path            alloc_name  alloc_ext  kmod  comp   name             ext */
-       T("[test_module]", true      , true     , true, false, "[test_module]", NULL);
-       T("[test_module]", false     , true     , true, false, NULL           , NULL);
-       T("[test_module]", true      , false    , true, false, "[test_module]", NULL);
-       T("[test_module]", false     , false    , true, false, NULL           , NULL);
+       /* path            alloc_name  kmod  comp   name           */
+       T("[test_module]", true      , true, false, "[test_module]");
+       T("[test_module]", false     , true, false, NULL           );
+       T("[test_module]", true      , true, false, "[test_module]");
+       T("[test_module]", false     , true, false, NULL           );
        M("[test_module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
        M("[test_module]", PERF_RECORD_MISC_KERNEL, true);
        M("[test_module]", PERF_RECORD_MISC_USER, false);
 
-       /* path            alloc_name  alloc_ext  kmod  comp   name             ext */
-       T("[test.module]", true      , true     , true, false, "[test.module]", NULL);
-       T("[test.module]", false     , true     , true, false, NULL           , NULL);
-       T("[test.module]", true      , false    , true, false, "[test.module]", NULL);
-       T("[test.module]", false     , false    , true, false, NULL           , NULL);
+       /* path            alloc_name  kmod  comp   name           */
+       T("[test.module]", true      , true, false, "[test.module]");
+       T("[test.module]", false     , true, false, NULL           );
+       T("[test.module]", true      , true, false, "[test.module]");
+       T("[test.module]", false     , true, false, NULL           );
        M("[test.module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
        M("[test.module]", PERF_RECORD_MISC_KERNEL, true);
        M("[test.module]", PERF_RECORD_MISC_USER, false);
 
-       /* path     alloc_name  alloc_ext  kmod   comp   name      ext */
-       T("[vdso]", true      , true     , false, false, "[vdso]", NULL);
-       T("[vdso]", false     , true     , false, false, NULL    , NULL);
-       T("[vdso]", true      , false    , false, false, "[vdso]", NULL);
-       T("[vdso]", false     , false    , false, false, NULL    , NULL);
+       /* path     alloc_name  kmod   comp   name    */
+       T("[vdso]", true      , false, false, "[vdso]");
+       T("[vdso]", false     , false, false, NULL    );
+       T("[vdso]", true      , false, false, "[vdso]");
+       T("[vdso]", false     , false, false, NULL    );
        M("[vdso]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("[vdso]", PERF_RECORD_MISC_KERNEL, false);
        M("[vdso]", PERF_RECORD_MISC_USER, false);
 
-       T("[vdso32]", true      , true     , false, false, "[vdso32]", NULL);
-       T("[vdso32]", false     , true     , false, false, NULL    , NULL);
-       T("[vdso32]", true      , false    , false, false, "[vdso32]", NULL);
-       T("[vdso32]", false     , false    , false, false, NULL    , NULL);
+       T("[vdso32]", true      , false, false, "[vdso32]");
+       T("[vdso32]", false     , false, false, NULL      );
+       T("[vdso32]", true      , false, false, "[vdso32]");
+       T("[vdso32]", false     , false, false, NULL      );
        M("[vdso32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("[vdso32]", PERF_RECORD_MISC_KERNEL, false);
        M("[vdso32]", PERF_RECORD_MISC_USER, false);
 
-       T("[vdsox32]", true      , true     , false, false, "[vdsox32]", NULL);
-       T("[vdsox32]", false     , true     , false, false, NULL    , NULL);
-       T("[vdsox32]", true      , false    , false, false, "[vdsox32]", NULL);
-       T("[vdsox32]", false     , false    , false, false, NULL    , NULL);
+       T("[vdsox32]", true      , false, false, "[vdsox32]");
+       T("[vdsox32]", false     , false, false, NULL       );
+       T("[vdsox32]", true      , false, false, "[vdsox32]");
+       T("[vdsox32]", false     , false, false, NULL       );
        M("[vdsox32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("[vdsox32]", PERF_RECORD_MISC_KERNEL, false);
        M("[vdsox32]", PERF_RECORD_MISC_USER, false);
 
-       /* path         alloc_name  alloc_ext  kmod   comp   name          ext */
-       T("[vsyscall]", true      , true     , false, false, "[vsyscall]", NULL);
-       T("[vsyscall]", false     , true     , false, false, NULL        , NULL);
-       T("[vsyscall]", true      , false    , false, false, "[vsyscall]", NULL);
-       T("[vsyscall]", false     , false    , false, false, NULL        , NULL);
+       /* path         alloc_name  kmod   comp   name        */
+       T("[vsyscall]", true      , false, false, "[vsyscall]");
+       T("[vsyscall]", false     , false, false, NULL        );
+       T("[vsyscall]", true      , false, false, "[vsyscall]");
+       T("[vsyscall]", false     , false, false, NULL        );
        M("[vsyscall]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("[vsyscall]", PERF_RECORD_MISC_KERNEL, false);
        M("[vsyscall]", PERF_RECORD_MISC_USER, false);
 
-       /* path                alloc_name  alloc_ext  kmod   comp   name      ext */
-       T("[kernel.kallsyms]", true      , true     , false, false, "[kernel.kallsyms]", NULL);
-       T("[kernel.kallsyms]", false     , true     , false, false, NULL               , NULL);
-       T("[kernel.kallsyms]", true      , false    , false, false, "[kernel.kallsyms]", NULL);
-       T("[kernel.kallsyms]", false     , false    , false, false, NULL               , NULL);
+       /* path                alloc_name  kmod   comp   name      */
+       T("[kernel.kallsyms]", true      , false, false, "[kernel.kallsyms]");
+       T("[kernel.kallsyms]", false     , false, false, NULL               );
+       T("[kernel.kallsyms]", true      , false, false, "[kernel.kallsyms]");
+       T("[kernel.kallsyms]", false     , false, false, NULL               );
        M("[kernel.kallsyms]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
        M("[kernel.kallsyms]", PERF_RECORD_MISC_KERNEL, false);
        M("[kernel.kallsyms]", PERF_RECORD_MISC_USER, false);
index 0c3c87f86e034b45ecbe65e162457b3ddf47a947..9e9e4d37cc771868c183e22fbba4eaa6c095ba1b 100644 (file)
@@ -24,8 +24,6 @@ static unsigned long *get_bitmap(const char *str, int nbits)
        bm = bitmap_alloc(nbits);
 
        if (map && bm) {
-               bitmap_zero(bm, nbits);
-
                for (i = 0; i < map->nr; i++) {
                        set_bit(map->map[i], bm);
                }
index 3b4f1c10ff57c8ae7b64baaee79ada7aec3b5ba1..1d00e5ec7906ebaf23b57a5d9e1471ed1cbab465 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <sys/ttydefaults.h>
+#include <asm/bug.h>
 
 struct disasm_line_samples {
        double                percent;
@@ -115,7 +116,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
        if (!browser->navkeypressed)
                ops.width += 1;
 
-       annotation_line__write(al, notes, &ops);
+       annotation_line__write(al, notes, &ops, ab->opts);
 
        if (ops.current_entry)
                ab->selection = al;
@@ -227,10 +228,10 @@ static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
 {
        int i;
 
-       for (i = 0; i < a->samples_nr; i++) {
-               if (a->samples[i].percent == b->samples[i].percent)
+       for (i = 0; i < a->data_nr; i++) {
+               if (a->data[i].percent == b->data[i].percent)
                        continue;
-               return a->samples[i].percent < b->samples[i].percent;
+               return a->data[i].percent < b->data[i].percent;
        }
        return 0;
 }
@@ -314,11 +315,14 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
                        continue;
                }
 
-               for (i = 0; i < pos->al.samples_nr; i++) {
-                       struct annotation_data *sample = &pos->al.samples[i];
+               for (i = 0; i < pos->al.data_nr; i++) {
+                       double percent;
 
-                       if (max_percent < sample->percent)
-                               max_percent = sample->percent;
+                       percent = annotation_data__percent(&pos->al.data[i],
+                                                          browser->opts->percent_type);
+
+                       if (max_percent < percent)
+                               max_percent = percent;
                }
 
                if (max_percent < 0.01 && pos->al.ipc == 0) {
@@ -380,9 +384,10 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser)
 #define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
 
 static int sym_title(struct symbol *sym, struct map *map, char *title,
-                    size_t sz)
+                    size_t sz, int percent_type)
 {
-       return snprintf(title, sz, "%s  %s", sym->name, map->dso->long_name);
+       return snprintf(title, sz, "%s  %s [Percent: %s]", sym->name, map->dso->long_name,
+                       percent_type_str(percent_type));
 }
 
 /*
@@ -420,7 +425,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
 
        pthread_mutex_unlock(&notes->lock);
        symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt, browser->opts);
-       sym_title(ms->sym, ms->map, title, sizeof(title));
+       sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type);
        ui_browser__show_title(&browser->b, title);
        return true;
 }
@@ -595,6 +600,7 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser,
 
 static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help)
 {
+       struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
        struct map_symbol *ms = browser->priv;
        struct symbol *sym = ms->sym;
        char symbol_dso[SYM_TITLE_MAX_SIZE];
@@ -602,7 +608,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const
        if (ui_browser__show(browser, title, help) < 0)
                return -1;
 
-       sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso));
+       sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type);
 
        ui_browser__gotorc_title(browser, 0, 0);
        ui_browser__set_color(browser, HE_COLORSET_ROOT);
@@ -610,6 +616,39 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const
        return 0;
 }
 
+static void
+switch_percent_type(struct annotation_options *opts, bool base)
+{
+       switch (opts->percent_type) {
+       case PERCENT_HITS_LOCAL:
+               if (base)
+                       opts->percent_type = PERCENT_PERIOD_LOCAL;
+               else
+                       opts->percent_type = PERCENT_HITS_GLOBAL;
+               break;
+       case PERCENT_HITS_GLOBAL:
+               if (base)
+                       opts->percent_type = PERCENT_PERIOD_GLOBAL;
+               else
+                       opts->percent_type = PERCENT_HITS_LOCAL;
+               break;
+       case PERCENT_PERIOD_LOCAL:
+               if (base)
+                       opts->percent_type = PERCENT_HITS_LOCAL;
+               else
+                       opts->percent_type = PERCENT_PERIOD_GLOBAL;
+               break;
+       case PERCENT_PERIOD_GLOBAL:
+               if (base)
+                       opts->percent_type = PERCENT_HITS_GLOBAL;
+               else
+                       opts->percent_type = PERCENT_PERIOD_LOCAL;
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
 static int annotate_browser__run(struct annotate_browser *browser,
                                 struct perf_evsel *evsel,
                                 struct hist_browser_timer *hbt)
@@ -624,8 +663,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
        char title[256];
        int key;
 
-       annotation__scnprintf_samples_period(notes, title, sizeof(title), evsel);
-
+       hists__scnprintf_title(hists, title, sizeof(title));
        if (annotate_browser__show(&browser->b, title, help) < 0)
                return -1;
 
@@ -701,6 +739,8 @@ static int annotate_browser__run(struct annotate_browser *browser,
                "k             Toggle line numbers\n"
                "P             Print to [symbol_name].annotation file.\n"
                "r             Run available scripts\n"
+               "p             Toggle percent type [local/global]\n"
+               "b             Toggle percent base [period/hits]\n"
                "?             Search string backwards\n");
                        continue;
                case 'r':
@@ -781,7 +821,7 @@ show_sup_ins:
                        continue;
                }
                case 'P':
-                       map_symbol__annotation_dump(ms, evsel);
+                       map_symbol__annotation_dump(ms, evsel, browser->opts);
                        continue;
                case 't':
                        if (notes->options->show_total_period) {
@@ -800,6 +840,12 @@ show_sup_ins:
                                notes->options->show_minmax_cycle = true;
                        annotation__update_column_widths(notes);
                        continue;
+               case 'p':
+               case 'b':
+                       switch_percent_type(browser->opts, key == 'b');
+                       hists__scnprintf_title(hists, title, sizeof(title));
+                       annotate_browser__show(&browser->b, title, help);
+                       continue;
                case K_LEFT:
                case K_ESC:
                case 'q':
index b604ef334dc956f77083908bed572eb5ba7be615..7efe15b9618d05b2cc40d3561a811f357189decb 100644 (file)
@@ -87,6 +87,7 @@ libperf-$(CONFIG_AUXTRACE) += intel-pt.o
 libperf-$(CONFIG_AUXTRACE) += intel-bts.o
 libperf-$(CONFIG_AUXTRACE) += arm-spe.o
 libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o
 
 ifdef CONFIG_LIBOPENCSD
 libperf-$(CONFIG_AUXTRACE) += cs-etm.o
index f91775b4bc3cb3d8eaf0aa9e936cd048dde58918..28cd6a17491b2077815ce0d9bb86f741f7a2be6e 100644 (file)
@@ -49,6 +49,7 @@ struct annotation_options annotation__default_options = {
        .jump_arrows    = true,
        .annotate_src   = true,
        .offset_level   = ANNOTATION__OFFSET_JUMP_TARGETS,
+       .percent_type   = PERCENT_PERIOD_LOCAL,
 };
 
 static regex_t  file_lineno;
@@ -245,8 +246,14 @@ find_target:
 
 indirect_call:
        tok = strchr(endptr, '*');
-       if (tok != NULL)
-               ops->target.addr = strtoull(tok + 1, NULL, 16);
+       if (tok != NULL) {
+               endptr++;
+
+               /* Indirect call can use a non-rip register and offset: callq  *0x8(%rbx).
+                * Do not parse such instruction.  */
+               if (strstr(endptr, "(%r") == NULL)
+                       ops->target.addr = strtoull(endptr, NULL, 16);
+       }
        goto find_target;
 }
 
@@ -275,7 +282,19 @@ bool ins__is_call(const struct ins *ins)
        return ins->ops == &call_ops || ins->ops == &s390_call_ops;
 }
 
-static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms)
+/*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70:       b.cs    ffff2000084470f4 <generic_exec_single+0x314>  // b.hs, b.nlast
+ */
+static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+{
+       if (ops->raw_comment && c > ops->raw_comment)
+               return NULL;
+
+       return c;
+}
+
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
 {
        struct map *map = ms->map;
        struct symbol *sym = ms->sym;
@@ -284,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        };
        const char *c = strchr(ops->raw, ',');
        u64 start, end;
+
+       ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+       c = validate_comma(c, ops);
+
        /*
         * Examples of lines to parse for the _cpp_lex_token@@Base
         * function:
@@ -303,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
                ops->target.addr = strtoull(c, NULL, 16);
                if (!ops->target.addr) {
                        c = strchr(c, ',');
+                       c = validate_comma(c, ops);
                        if (c++ != NULL)
                                ops->target.addr = strtoull(c, NULL, 16);
                }
@@ -360,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
 
        c = strchr(ops->raw, ',');
+       c = validate_comma(c, ops);
+
        if (c != NULL) {
                const char *c2 = strchr(c + 1, ',');
 
+               c2 = validate_comma(c2, ops);
                /* check for 3-op insn */
                if (c2 != NULL)
                        c = c2;
@@ -1108,7 +1135,7 @@ annotation_line__new(struct annotate_args *args, size_t privsize)
        if (perf_evsel__is_group_event(evsel))
                nr = evsel->nr_members;
 
-       size += sizeof(al->samples[0]) * nr;
+       size += sizeof(al->data[0]) * nr;
 
        al = zalloc(size);
        if (al) {
@@ -1117,7 +1144,7 @@ annotation_line__new(struct annotate_args *args, size_t privsize)
                al->offset     = args->offset;
                al->line       = strdup(args->line);
                al->line_nr    = args->line_nr;
-               al->samples_nr = nr;
+               al->data_nr    = nr;
        }
 
        return al;
@@ -1297,7 +1324,8 @@ static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_wi
 static int
 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
                       struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
-                      int max_lines, struct annotation_line *queue, int addr_fmt_width)
+                      int max_lines, struct annotation_line *queue, int addr_fmt_width,
+                      int percent_type)
 {
        struct disasm_line *dl = container_of(al, struct disasm_line, al);
        static const char *prev_line;
@@ -1309,15 +1337,18 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
                const char *color;
                struct annotation *notes = symbol__annotation(sym);
 
-               for (i = 0; i < al->samples_nr; i++) {
-                       struct annotation_data *sample = &al->samples[i];
+               for (i = 0; i < al->data_nr; i++) {
+                       double percent;
 
-                       if (sample->percent > max_percent)
-                               max_percent = sample->percent;
+                       percent = annotation_data__percent(&al->data[i],
+                                                          percent_type);
+
+                       if (percent > max_percent)
+                               max_percent = percent;
                }
 
-               if (al->samples_nr > nr_percent)
-                       nr_percent = al->samples_nr;
+               if (al->data_nr > nr_percent)
+                       nr_percent = al->data_nr;
 
                if (max_percent < min_pcnt)
                        return -1;
@@ -1330,7 +1361,8 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
                                if (queue == al)
                                        break;
                                annotation_line__print(queue, sym, start, evsel, len,
-                                                      0, 0, 1, NULL, addr_fmt_width);
+                                                      0, 0, 1, NULL, addr_fmt_width,
+                                                      percent_type);
                        }
                }
 
@@ -1351,18 +1383,20 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
                }
 
                for (i = 0; i < nr_percent; i++) {
-                       struct annotation_data *sample = &al->samples[i];
+                       struct annotation_data *data = &al->data[i];
+                       double percent;
 
-                       color = get_percent_color(sample->percent);
+                       percent = annotation_data__percent(data, percent_type);
+                       color = get_percent_color(percent);
 
                        if (symbol_conf.show_total_period)
                                color_fprintf(stdout, color, " %11" PRIu64,
-                                             sample->he.period);
+                                             data->he.period);
                        else if (symbol_conf.show_nr_samples)
                                color_fprintf(stdout, color, " %7" PRIu64,
-                                             sample->he.nr_samples);
+                                             data->he.nr_samples);
                        else
-                               color_fprintf(stdout, color, " %7.2f", sample->percent);
+                               color_fprintf(stdout, color, " %7.2f", percent);
                }
 
                printf(" : ");
@@ -1621,6 +1655,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        char symfs_filename[PATH_MAX];
        struct kcore_extract kce;
        bool delete_extract = false;
+       bool decomp = false;
        int stdout_fd[2];
        int lineno = 0;
        int nline;
@@ -1654,6 +1689,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
                                                 tmp, sizeof(tmp)) < 0)
                        goto out;
 
+               decomp = true;
                strcpy(symfs_filename, tmp);
        }
 
@@ -1740,7 +1776,7 @@ out_free_command:
 out_remove_tmp:
        close(stdout_fd[0]);
 
-       if (dso__needs_decompress(dso))
+       if (decomp)
                unlink(symfs_filename);
 
        if (delete_extract)
@@ -1753,34 +1789,45 @@ out_close_stdout:
        goto out_free_command;
 }
 
-static void calc_percent(struct sym_hist *hist,
-                        struct annotation_data *sample,
+static void calc_percent(struct sym_hist *sym_hist,
+                        struct hists *hists,
+                        struct annotation_data *data,
                         s64 offset, s64 end)
 {
        unsigned int hits = 0;
        u64 period = 0;
 
        while (offset < end) {
-               hits   += hist->addr[offset].nr_samples;
-               period += hist->addr[offset].period;
+               hits   += sym_hist->addr[offset].nr_samples;
+               period += sym_hist->addr[offset].period;
                ++offset;
        }
 
-       if (hist->nr_samples) {
-               sample->he.period     = period;
-               sample->he.nr_samples = hits;
-               sample->percent = 100.0 * hits / hist->nr_samples;
+       if (sym_hist->nr_samples) {
+               data->he.period     = period;
+               data->he.nr_samples = hits;
+               data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples;
        }
+
+       if (hists->stats.nr_non_filtered_samples)
+               data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples;
+
+       if (sym_hist->period)
+               data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period;
+
+       if (hists->stats.total_period)
+               data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period;
 }
 
 static void annotation__calc_percent(struct annotation *notes,
-                                    struct perf_evsel *evsel, s64 len)
+                                    struct perf_evsel *leader, s64 len)
 {
        struct annotation_line *al, *next;
+       struct perf_evsel *evsel;
 
        list_for_each_entry(al, &notes->src->source, node) {
                s64 end;
-               int i;
+               int i = 0;
 
                if (al->offset == -1)
                        continue;
@@ -1788,14 +1835,17 @@ static void annotation__calc_percent(struct annotation *notes,
                next = annotation_line__next(al, &notes->src->source);
                end  = next ? next->offset : len;
 
-               for (i = 0; i < al->samples_nr; i++) {
-                       struct annotation_data *sample;
-                       struct sym_hist *hist;
+               for_each_group_evsel(evsel, leader) {
+                       struct hists *hists = evsel__hists(evsel);
+                       struct annotation_data *data;
+                       struct sym_hist *sym_hist;
 
-                       hist   = annotation__histogram(notes, evsel->idx + i);
-                       sample = &al->samples[i];
+                       BUG_ON(i >= al->data_nr);
 
-                       calc_percent(hist, sample, al->offset, end);
+                       sym_hist = annotation__histogram(notes, evsel->idx);
+                       data = &al->data[i++];
+
+                       calc_percent(sym_hist, hists, data, al->offset, end);
                }
        }
 }
@@ -1846,7 +1896,8 @@ int symbol__annotate(struct symbol *sym, struct map *map,
        return symbol__disassemble(sym, &args);
 }
 
-static void insert_source_line(struct rb_root *root, struct annotation_line *al)
+static void insert_source_line(struct rb_root *root, struct annotation_line *al,
+                              struct annotation_options *opts)
 {
        struct annotation_line *iter;
        struct rb_node **p = &root->rb_node;
@@ -1859,8 +1910,10 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al)
 
                ret = strcmp(iter->path, al->path);
                if (ret == 0) {
-                       for (i = 0; i < al->samples_nr; i++)
-                               iter->samples[i].percent_sum += al->samples[i].percent;
+                       for (i = 0; i < al->data_nr; i++) {
+                               iter->data[i].percent_sum += annotation_data__percent(&al->data[i],
+                                                                                     opts->percent_type);
+                       }
                        return;
                }
 
@@ -1870,8 +1923,10 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al)
                        p = &(*p)->rb_right;
        }
 
-       for (i = 0; i < al->samples_nr; i++)
-               al->samples[i].percent_sum = al->samples[i].percent;
+       for (i = 0; i < al->data_nr; i++) {
+               al->data[i].percent_sum = annotation_data__percent(&al->data[i],
+                                                                  opts->percent_type);
+       }
 
        rb_link_node(&al->rb_node, parent, p);
        rb_insert_color(&al->rb_node, root);
@@ -1881,10 +1936,10 @@ static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
 {
        int i;
 
-       for (i = 0; i < a->samples_nr; i++) {
-               if (a->samples[i].percent_sum == b->samples[i].percent_sum)
+       for (i = 0; i < a->data_nr; i++) {
+               if (a->data[i].percent_sum == b->data[i].percent_sum)
                        continue;
-               return a->samples[i].percent_sum > b->samples[i].percent_sum;
+               return a->data[i].percent_sum > b->data[i].percent_sum;
        }
 
        return 0;
@@ -1949,8 +2004,8 @@ static void print_summary(struct rb_root *root, const char *filename)
                int i;
 
                al = rb_entry(node, struct annotation_line, rb_node);
-               for (i = 0; i < al->samples_nr; i++) {
-                       percent = al->samples[i].percent_sum;
+               for (i = 0; i < al->data_nr; i++) {
+                       percent = al->data[i].percent_sum;
                        color = get_percent_color(percent);
                        color_fprintf(stdout, color, " %7.2f", percent);
 
@@ -2029,10 +2084,12 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
                evsel_name = buf;
        }
 
-       graph_dotted_len = printf(" %-*.*s|     Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
+       graph_dotted_len = printf(" %-*.*s|     Source code & Disassembly of %s for %s (%" PRIu64 " samples, "
+                                 "percent: %s)\n",
                                  width, width, symbol_conf.show_total_period ? "Period" :
                                  symbol_conf.show_nr_samples ? "Samples" : "Percent",
-                                 d_filename, evsel_name, h->nr_samples);
+                                 d_filename, evsel_name, h->nr_samples,
+                                 percent_type_str(opts->percent_type));
 
        printf("%-*.*s----\n",
               graph_dotted_len, graph_dotted_len, graph_dotted_line);
@@ -2052,7 +2109,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
 
                err = annotation_line__print(pos, sym, start, evsel, len,
                                             opts->min_pcnt, printed, opts->max_lines,
-                                            queue, addr_fmt_width);
+                                            queue, addr_fmt_width, opts->percent_type);
 
                switch (err) {
                case 0:
@@ -2129,10 +2186,11 @@ static void FILE__write_graph(void *fp, int graph)
        fputs(s, fp);
 }
 
-int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
+static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp,
+                                    struct annotation_options *opts)
 {
        struct annotation *notes = symbol__annotation(sym);
-       struct annotation_write_ops ops = {
+       struct annotation_write_ops wops = {
                .first_line              = true,
                .obj                     = fp,
                .set_color               = FILE__set_color,
@@ -2146,15 +2204,16 @@ int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp)
        list_for_each_entry(al, &notes->src->source, node) {
                if (annotation_line__filter(al, notes))
                        continue;
-               annotation_line__write(al, notes, &ops);
+               annotation_line__write(al, notes, &wops, opts);
                fputc('\n', fp);
-               ops.first_line = false;
+               wops.first_line = false;
        }
 
        return 0;
 }
 
-int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel)
+int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel,
+                               struct annotation_options *opts)
 {
        const char *ev_name = perf_evsel__name(evsel);
        char buf[1024];
@@ -2176,7 +2235,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel)
 
        fprintf(fp, "%s() %s\nEvent: %s\n\n",
                ms->sym->name, ms->map->dso->long_name, ev_name);
-       symbol__annotate_fprintf2(ms->sym, fp);
+       symbol__annotate_fprintf2(ms->sym, fp, opts);
 
        fclose(fp);
        err = 0;
@@ -2346,7 +2405,8 @@ void annotation__update_column_widths(struct annotation *notes)
 }
 
 static void annotation__calc_lines(struct annotation *notes, struct map *map,
-                                 struct rb_root *root)
+                                  struct rb_root *root,
+                                  struct annotation_options *opts)
 {
        struct annotation_line *al;
        struct rb_root tmp_root = RB_ROOT;
@@ -2355,13 +2415,14 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
                double percent_max = 0.0;
                int i;
 
-               for (i = 0; i < al->samples_nr; i++) {
-                       struct annotation_data *sample;
+               for (i = 0; i < al->data_nr; i++) {
+                       double percent;
 
-                       sample = &al->samples[i];
+                       percent = annotation_data__percent(&al->data[i],
+                                                          opts->percent_type);
 
-                       if (sample->percent > percent_max)
-                               percent_max = sample->percent;
+                       if (percent > percent_max)
+                               percent_max = percent;
                }
 
                if (percent_max <= 0.5)
@@ -2369,18 +2430,19 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
 
                al->path = get_srcline(map->dso, notes->start + al->offset, NULL,
                                       false, true, notes->start + al->offset);
-               insert_source_line(&tmp_root, al);
+               insert_source_line(&tmp_root, al, opts);
        }
 
        resort_source_line(root, &tmp_root);
 }
 
 static void symbol__calc_lines(struct symbol *sym, struct map *map,
-                             struct rb_root *root)
+                              struct rb_root *root,
+                              struct annotation_options *opts)
 {
        struct annotation *notes = symbol__annotation(sym);
 
-       annotation__calc_lines(notes, map, root);
+       annotation__calc_lines(notes, map, root, opts);
 }
 
 int symbol__tty_annotate2(struct symbol *sym, struct map *map,
@@ -2389,7 +2451,7 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
 {
        struct dso *dso = map->dso;
        struct rb_root source_line = RB_ROOT;
-       struct annotation *notes = symbol__annotation(sym);
+       struct hists *hists = evsel__hists(evsel);
        char buf[1024];
 
        if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0)
@@ -2397,13 +2459,14 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map,
 
        if (opts->print_lines) {
                srcline_full_filename = opts->full_path;
-               symbol__calc_lines(sym, map, &source_line);
+               symbol__calc_lines(sym, map, &source_line, opts);
                print_summary(&source_line, dso->long_name);
        }
 
-       annotation__scnprintf_samples_period(notes, buf, sizeof(buf), evsel);
-       fprintf(stdout, "%s\n%s() %s\n", buf, sym->name, dso->long_name);
-       symbol__annotate_fprintf2(sym, stdout);
+       hists__scnprintf_title(hists, buf, sizeof(buf));
+       fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
+               buf, percent_type_str(opts->percent_type), sym->name, dso->long_name);
+       symbol__annotate_fprintf2(sym, stdout, opts);
 
        annotated_source__purge(symbol__annotation(sym)->src);
 
@@ -2424,7 +2487,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
 
        if (opts->print_lines) {
                srcline_full_filename = opts->full_path;
-               symbol__calc_lines(sym, map, &source_line);
+               symbol__calc_lines(sym, map, &source_line, opts);
                print_summary(&source_line, dso->long_name);
        }
 
@@ -2441,14 +2504,21 @@ bool ui__has_annotation(void)
 }
 
 
-double annotation_line__max_percent(struct annotation_line *al, struct annotation *notes)
+static double annotation_line__max_percent(struct annotation_line *al,
+                                          struct annotation *notes,
+                                          unsigned int percent_type)
 {
        double percent_max = 0.0;
        int i;
 
        for (i = 0; i < notes->nr_events; i++) {
-               if (al->samples[i].percent > percent_max)
-                       percent_max = al->samples[i].percent;
+               double percent;
+
+               percent = annotation_data__percent(&al->data[i],
+                                                  percent_type);
+
+               if (percent > percent_max)
+                       percent_max = percent;
        }
 
        return percent_max;
@@ -2487,7 +2557,7 @@ call_like:
 
 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
                                     bool first_line, bool current_entry, bool change_color, int width,
-                                    void *obj,
+                                    void *obj, unsigned int percent_type,
                                     int  (*obj__set_color)(void *obj, int color),
                                     void (*obj__set_percent_color)(void *obj, double percent, bool current),
                                     int  (*obj__set_jumps_percent_color)(void *obj, int nr, bool current),
@@ -2495,7 +2565,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
                                     void (*obj__write_graph)(void *obj, int graph))
 
 {
-       double percent_max = annotation_line__max_percent(al, notes);
+       double percent_max = annotation_line__max_percent(al, notes, percent_type);
        int pcnt_width = annotation__pcnt_width(notes),
            cycles_width = annotation__cycles_width(notes);
        bool show_title = false;
@@ -2514,15 +2584,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
                int i;
 
                for (i = 0; i < notes->nr_events; i++) {
-                       obj__set_percent_color(obj, al->samples[i].percent, current_entry);
+                       double percent;
+
+                       percent = annotation_data__percent(&al->data[i], percent_type);
+
+                       obj__set_percent_color(obj, percent, current_entry);
                        if (notes->options->show_total_period) {
-                               obj__printf(obj, "%11" PRIu64 " ", al->samples[i].he.period);
+                               obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
                        } else if (notes->options->show_nr_samples) {
                                obj__printf(obj, "%6" PRIu64 " ",
-                                                  al->samples[i].he.nr_samples);
+                                                  al->data[i].he.nr_samples);
                        } else {
-                               obj__printf(obj, "%6.2f ",
-                                                  al->samples[i].percent);
+                               obj__printf(obj, "%6.2f ", percent);
                        }
                }
        } else {
@@ -2640,13 +2713,15 @@ print_addr:
 }
 
 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
-                           struct annotation_write_ops *ops)
+                           struct annotation_write_ops *wops,
+                           struct annotation_options *opts)
 {
-       __annotation_line__write(al, notes, ops->first_line, ops->current_entry,
-                                ops->change_color, ops->width, ops->obj,
-                                ops->set_color, ops->set_percent_color,
-                                ops->set_jumps_percent_color, ops->printf,
-                                ops->write_graph);
+       __annotation_line__write(al, notes, wops->first_line, wops->current_entry,
+                                wops->change_color, wops->width, wops->obj,
+                                opts->percent_type,
+                                wops->set_color, wops->set_percent_color,
+                                wops->set_jumps_percent_color, wops->printf,
+                                wops->write_graph);
 }
 
 int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *evsel,
@@ -2688,46 +2763,6 @@ out_free_offsets:
        return -1;
 }
 
-int __annotation__scnprintf_samples_period(struct annotation *notes,
-                                          char *bf, size_t size,
-                                          struct perf_evsel *evsel,
-                                          bool show_freq)
-{
-       const char *ev_name = perf_evsel__name(evsel);
-       char buf[1024], ref[30] = " show reference callgraph, ";
-       char sample_freq_str[64] = "";
-       unsigned long nr_samples = 0;
-       int nr_members = 1;
-       bool enable_ref = false;
-       u64 nr_events = 0;
-       char unit;
-       int i;
-
-       if (perf_evsel__is_group_event(evsel)) {
-               perf_evsel__group_desc(evsel, buf, sizeof(buf));
-               ev_name = buf;
-                nr_members = evsel->nr_members;
-       }
-
-       for (i = 0; i < nr_members; i++) {
-               struct sym_hist *ah = annotation__histogram(notes, evsel->idx + i);
-
-               nr_samples += ah->nr_samples;
-               nr_events  += ah->period;
-       }
-
-       if (symbol_conf.show_ref_callgraph && strstr(ev_name, "call-graph=no"))
-               enable_ref = true;
-
-       if (show_freq)
-               scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq);
-
-       nr_samples = convert_unit(nr_samples, &unit);
-       return scnprintf(bf, size, "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
-                        nr_samples, unit, evsel->nr_members > 1 ? "s" : "",
-                        ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
-}
-
 #define ANNOTATION__CFG(n) \
        { .name = #n, .value = &annotation__default_options.n, }
 
@@ -2792,3 +2827,55 @@ void annotation_config__init(void)
        annotation__default_options.show_total_period = symbol_conf.show_total_period;
        annotation__default_options.show_nr_samples   = symbol_conf.show_nr_samples;
 }
+
+static unsigned int parse_percent_type(char *str1, char *str2)
+{
+       unsigned int type = (unsigned int) -1;
+
+       if (!strcmp("period", str1)) {
+               if (!strcmp("local", str2))
+                       type = PERCENT_PERIOD_LOCAL;
+               else if (!strcmp("global", str2))
+                       type = PERCENT_PERIOD_GLOBAL;
+       }
+
+       if (!strcmp("hits", str1)) {
+               if (!strcmp("local", str2))
+                       type = PERCENT_HITS_LOCAL;
+               else if (!strcmp("global", str2))
+                       type = PERCENT_HITS_GLOBAL;
+       }
+
+       return type;
+}
+
+int annotate_parse_percent_type(const struct option *opt, const char *_str,
+                               int unset __maybe_unused)
+{
+       struct annotation_options *opts = opt->value;
+       unsigned int type;
+       char *str1, *str2;
+       int err = -1;
+
+       str1 = strdup(_str);
+       if (!str1)
+               return -ENOMEM;
+
+       str2 = strchr(str1, '-');
+       if (!str2)
+               goto out;
+
+       *str2++ = 0;
+
+       type = parse_percent_type(str1, str2);
+       if (type == (unsigned int) -1)
+               type = parse_percent_type(str2, str1);
+       if (type != (unsigned int) -1) {
+               opts->percent_type = type;
+               err = 0;
+       }
+
+out:
+       free(str1);
+       return err;
+}
index a4c0d91907e662853aac249db5ce69dd3da9019b..5399ba2321bbb2c348e89fac2db9fed7c8d35384 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <pthread.h>
+#include <asm/bug.h>
 
 struct ins_ops;
 
@@ -21,6 +22,7 @@ struct ins {
 
 struct ins_operands {
        char    *raw;
+       char    *raw_comment;
        struct {
                char    *raw;
                char    *name;
@@ -82,6 +84,7 @@ struct annotation_options {
        int  context;
        const char *objdump_path;
        const char *disassembler_style;
+       unsigned int percent_type;
 };
 
 enum {
@@ -101,8 +104,16 @@ struct sym_hist_entry {
        u64             period;
 };
 
+enum {
+       PERCENT_HITS_LOCAL,
+       PERCENT_HITS_GLOBAL,
+       PERCENT_PERIOD_LOCAL,
+       PERCENT_PERIOD_GLOBAL,
+       PERCENT_MAX,
+};
+
 struct annotation_data {
-       double                   percent;
+       double                   percent[PERCENT_MAX];
        double                   percent_sum;
        struct sym_hist_entry    he;
 };
@@ -122,8 +133,8 @@ struct annotation_line {
        char                    *path;
        u32                      idx;
        int                      idx_asm;
-       int                      samples_nr;
-       struct annotation_data   samples[0];
+       int                      data_nr;
+       struct annotation_data   data[0];
 };
 
 struct disasm_line {
@@ -134,6 +145,27 @@ struct disasm_line {
        struct annotation_line   al;
 };
 
+static inline double annotation_data__percent(struct annotation_data *data,
+                                             unsigned int which)
+{
+       return which < PERCENT_MAX ? data->percent[which] : -1;
+}
+
+static inline const char *percent_type_str(unsigned int type)
+{
+       static const char *str[PERCENT_MAX] = {
+               "local hits",
+               "global hits",
+               "local period",
+               "global period",
+       };
+
+       if (WARN_ON(type >= PERCENT_MAX))
+               return "N/A";
+
+       return str[type];
+}
+
 static inline struct disasm_line *disasm_line(struct annotation_line *al)
 {
        return al ? container_of(al, struct disasm_line, al) : NULL;
@@ -169,22 +201,15 @@ struct annotation_write_ops {
        void (*write_graph)(void *obj, int graph);
 };
 
-double annotation_line__max_percent(struct annotation_line *al, struct annotation *notes);
 void annotation_line__write(struct annotation_line *al, struct annotation *notes,
-                           struct annotation_write_ops *ops);
+                           struct annotation_write_ops *ops,
+                           struct annotation_options *opts);
 
 int __annotation__scnprintf_samples_period(struct annotation *notes,
                                           char *bf, size_t size,
                                           struct perf_evsel *evsel,
                                           bool show_freq);
 
-static inline int annotation__scnprintf_samples_period(struct annotation *notes,
-                                                      char *bf, size_t size,
-                                                      struct perf_evsel *evsel)
-{
-       return __annotation__scnprintf_samples_period(notes, bf, size, evsel, true);
-}
-
 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
 size_t disasm__fprintf(struct list_head *head, FILE *fp);
 void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
@@ -340,12 +365,12 @@ int symbol__strerror_disassemble(struct symbol *sym, struct map *map,
 int symbol__annotate_printf(struct symbol *sym, struct map *map,
                            struct perf_evsel *evsel,
                            struct annotation_options *options);
-int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp);
 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
 void annotated_source__purge(struct annotated_source *as);
 
-int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel);
+int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel,
+                               struct annotation_options *opts);
 
 bool ui__has_annotation(void);
 
@@ -373,4 +398,6 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
 
 void annotation_config__init(void);
 
+int annotate_parse_percent_type(const struct option *opt, const char *_str,
+                               int unset);
 #endif /* __PERF_ANNOTATE_H */
index d056447520a234cf759ac643fc2dd55b8eec47d9..db1511359c5e66b1040e42499cfc25df6677b026 100644 (file)
@@ -56,6 +56,7 @@
 #include "intel-pt.h"
 #include "intel-bts.h"
 #include "arm-spe.h"
+#include "s390-cpumsf.h"
 
 #include "sane_ctype.h"
 #include "symbol/kallsyms.h"
@@ -202,6 +203,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
        for (i = 0; i < queues->nr_queues; i++) {
                list_splice_tail(&queues->queue_array[i].head,
                                 &queue_array[i].head);
+               queue_array[i].tid = queues->queue_array[i].tid;
+               queue_array[i].cpu = queues->queue_array[i].cpu;
+               queue_array[i].set = queues->queue_array[i].set;
                queue_array[i].priv = queues->queue_array[i].priv;
        }
 
@@ -920,6 +924,8 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
                return arm_spe_process_auxtrace_info(event, session);
        case PERF_AUXTRACE_CS_ETM:
                return cs_etm__process_auxtrace_info(event, session);
+       case PERF_AUXTRACE_S390_CPUMSF:
+               return s390_cpumsf_process_auxtrace_info(event, session);
        case PERF_AUXTRACE_UNKNOWN:
        default:
                return -EINVAL;
index e731f55da072f604be21a42f7db7e94faf0ffa3e..71fc3bd742997b4bcbab07f72d88da869d8e089b 100644 (file)
@@ -44,6 +44,7 @@ enum auxtrace_type {
        PERF_AUXTRACE_INTEL_BTS,
        PERF_AUXTRACE_CS_ETM,
        PERF_AUXTRACE_ARM_SPE,
+       PERF_AUXTRACE_S390_CPUMSF,
 };
 
 enum itrace_period_type {
index 3d02ae38ec561279bfd7fb07a2314058ad9345ce..47aac41349a25764c39565343a051e79d7929247 100644 (file)
@@ -1529,13 +1529,13 @@ int bpf__apply_obj_config(void)
        bpf_object__for_each_safe(obj, objtmp)  \
                bpf_map__for_each(pos, obj)
 
-#define bpf__for_each_stdout_map(pos, obj, objtmp)     \
+#define bpf__for_each_map_named(pos, obj, objtmp, name)        \
        bpf__for_each_map(pos, obj, objtmp)             \
                if (bpf_map__name(pos) &&               \
-                       (strcmp("__bpf_stdout__",       \
+                       (strcmp(name,                   \
                                bpf_map__name(pos)) == 0))
 
-int bpf__setup_stdout(struct perf_evlist *evlist)
+struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name)
 {
        struct bpf_map_priv *tmpl_priv = NULL;
        struct bpf_object *obj, *tmp;
@@ -1544,11 +1544,11 @@ int bpf__setup_stdout(struct perf_evlist *evlist)
        int err;
        bool need_init = false;
 
-       bpf__for_each_stdout_map(map, obj, tmp) {
+       bpf__for_each_map_named(map, obj, tmp, name) {
                struct bpf_map_priv *priv = bpf_map__priv(map);
 
                if (IS_ERR(priv))
-                       return -BPF_LOADER_ERRNO__INTERNAL;
+                       return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
 
                /*
                 * No need to check map type: type should have been
@@ -1561,49 +1561,61 @@ int bpf__setup_stdout(struct perf_evlist *evlist)
        }
 
        if (!need_init)
-               return 0;
+               return NULL;
 
        if (!tmpl_priv) {
-               err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
-                                  NULL);
+               char *event_definition = NULL;
+
+               if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
+                       return ERR_PTR(-ENOMEM);
+
+               err = parse_events(evlist, event_definition, NULL);
+               free(event_definition);
+
                if (err) {
-                       pr_debug("ERROR: failed to create bpf-output event\n");
-                       return -err;
+                       pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
+                       return ERR_PTR(-err);
                }
 
                evsel = perf_evlist__last(evlist);
        }
 
-       bpf__for_each_stdout_map(map, obj, tmp) {
+       bpf__for_each_map_named(map, obj, tmp, name) {
                struct bpf_map_priv *priv = bpf_map__priv(map);
 
                if (IS_ERR(priv))
-                       return -BPF_LOADER_ERRNO__INTERNAL;
+                       return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
                if (priv)
                        continue;
 
                if (tmpl_priv) {
                        priv = bpf_map_priv__clone(tmpl_priv);
                        if (!priv)
-                               return -ENOMEM;
+                               return ERR_PTR(-ENOMEM);
 
                        err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
                        if (err) {
                                bpf_map_priv__clear(map, priv);
-                               return err;
+                               return ERR_PTR(err);
                        }
                } else if (evsel) {
                        struct bpf_map_op *op;
 
                        op = bpf_map__add_newop(map, NULL);
                        if (IS_ERR(op))
-                               return PTR_ERR(op);
+                               return ERR_PTR(PTR_ERR(op));
                        op->op_type = BPF_MAP_OP_SET_EVSEL;
                        op->v.evsel = evsel;
                }
        }
 
-       return 0;
+       return evsel;
+}
+
+int bpf__setup_stdout(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
+       return IS_ERR(evsel) ? PTR_ERR(evsel) : 0;
 }
 
 #define ERRNO_OFFSET(e)                ((e) - __BPF_LOADER_ERRNO__START)
@@ -1780,8 +1792,8 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
        return 0;
 }
 
-int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
-                              int err, char *buf, size_t size)
+int bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused,
+                                    int err, char *buf, size_t size)
 {
        bpf__strerror_head(err, buf, size);
        bpf__strerror_end(buf, size);
index 5d3aefd6fae7c6704c490599221d19e5fe357b28..62d245a90e1d96bf6f0888c9ffa78c7b3f43e72f 100644 (file)
@@ -43,6 +43,7 @@ enum bpf_loader_errno {
        __BPF_LOADER_ERRNO__END,
 };
 
+struct perf_evsel;
 struct bpf_object;
 struct parse_events_term;
 #define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
@@ -82,9 +83,8 @@ int bpf__apply_obj_config(void);
 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size);
 
 int bpf__setup_stdout(struct perf_evlist *evlist);
-int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err,
-                              char *buf, size_t size);
-
+struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name);
+int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size);
 #else
 #include <errno.h>
 
@@ -138,6 +138,12 @@ bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
        return 0;
 }
 
+static inline struct perf_evsel *
+bpf__setup_output_event(struct perf_evlist *evlist __maybe_unused, const char *name __maybe_unused)
+{
+       return NULL;
+}
+
 static inline int
 __bpf_strerror(char *buf, size_t size)
 {
@@ -193,11 +199,16 @@ bpf__strerror_apply_obj_config(int err __maybe_unused,
 }
 
 static inline int
-bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
-                          int err __maybe_unused, char *buf,
-                          size_t size)
+bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused,
+                                int err __maybe_unused, char *buf, size_t size)
 {
        return __bpf_strerror(buf, size);
 }
+
 #endif
+
+static inline int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err, char *buf, size_t size)
+{
+       return bpf__strerror_setup_output_event(evlist, err, buf, size);
+}
 #endif
index ecca688a25fb75977c542f6107e8b11f20345c70..892e92e7e7fc8e507baf198947eef5f6326afdf0 100644 (file)
@@ -4,10 +4,12 @@
 
 #ifdef HAVE_ZLIB_SUPPORT
 int gzip_decompress_to_file(const char *input, int output_fd);
+bool gzip_is_compressed(const char *input);
 #endif
 
 #ifdef HAVE_LZMA_SUPPORT
 int lzma_decompress_to_file(const char *input, int output_fd);
+bool lzma_is_compressed(const char *input);
 #endif
 
 #endif /* PERF_COMPRESS_H */
index 5744c12641a53d341ef46d548a3498272841cf54..abd38abf1d918ab81b360ed9691831eaac162161 100644 (file)
@@ -310,8 +310,8 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
        if (flags & FIELD_IS_DYNAMIC) {
                unsigned long long tmp_val;
 
-               tmp_val = pevent_read_number(fmtf->event->pevent,
-                               data + offset, len);
+               tmp_val = tep_read_number(fmtf->event->pevent,
+                                         data + offset, len);
                offset = tmp_val;
                len = offset >> 16;
                offset &= 0xffff;
@@ -353,7 +353,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                else {
                        unsigned long long value_int;
 
-                       value_int = pevent_read_number(
+                       value_int = tep_read_number(
                                        fmtf->event->pevent,
                                        data + offset + i * len, len);
 
index 51cf82cf18822fc66920f5d5850b4430ad8614ee..bbed90e5d9bb841782ed878a0c2ce1ae752418f9 100644 (file)
@@ -189,28 +189,34 @@ int dso__read_binary_type_filename(const struct dso *dso,
        return ret;
 }
 
+enum {
+       COMP_ID__NONE = 0,
+};
+
 static const struct {
        const char *fmt;
        int (*decompress)(const char *input, int output);
+       bool (*is_compressed)(const char *input);
 } compressions[] = {
+       [COMP_ID__NONE] = { .fmt = NULL, },
 #ifdef HAVE_ZLIB_SUPPORT
-       { "gz", gzip_decompress_to_file },
+       { "gz", gzip_decompress_to_file, gzip_is_compressed },
 #endif
 #ifdef HAVE_LZMA_SUPPORT
-       { "xz", lzma_decompress_to_file },
+       { "xz", lzma_decompress_to_file, lzma_is_compressed },
 #endif
-       { NULL, NULL },
+       { NULL, NULL, NULL },
 };
 
-bool is_supported_compression(const char *ext)
+static int is_supported_compression(const char *ext)
 {
        unsigned i;
 
-       for (i = 0; compressions[i].fmt; i++) {
+       for (i = 1; compressions[i].fmt; i++) {
                if (!strcmp(ext, compressions[i].fmt))
-                       return true;
+                       return i;
        }
-       return false;
+       return COMP_ID__NONE;
 }
 
 bool is_kernel_module(const char *pathname, int cpumode)
@@ -239,80 +245,73 @@ bool is_kernel_module(const char *pathname, int cpumode)
        return m.kmod;
 }
 
-bool decompress_to_file(const char *ext, const char *filename, int output_fd)
-{
-       unsigned i;
-
-       for (i = 0; compressions[i].fmt; i++) {
-               if (!strcmp(ext, compressions[i].fmt))
-                       return !compressions[i].decompress(filename,
-                                                          output_fd);
-       }
-       return false;
-}
-
 bool dso__needs_decompress(struct dso *dso)
 {
        return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 }
 
-static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
+static int decompress_kmodule(struct dso *dso, const char *name,
+                             char *pathname, size_t len)
 {
+       char tmpbuf[] = KMOD_DECOMP_NAME;
        int fd = -1;
-       struct kmod_path m;
 
        if (!dso__needs_decompress(dso))
                return -1;
 
-       if (kmod_path__parse_ext(&m, dso->long_name))
+       if (dso->comp == COMP_ID__NONE)
                return -1;
 
-       if (!m.comp)
-               goto out;
+       /*
+        * We have proper compression id for DSO and yet the file
+        * behind the 'name' can still be plain uncompressed object.
+        *
+        * The reason is behind the logic we open the DSO object files,
+        * when we try all possible 'debug' objects until we find the
+        * data. So even if the DSO is represented by 'krava.xz' module,
+        * we can end up here opening ~/.debug/....23432432/debug' file
+        * which is not compressed.
+        *
+        * To keep this transparent, we detect this and return the file
+        * descriptor to the uncompressed file.
+        */
+       if (!compressions[dso->comp].is_compressed(name))
+               return open(name, O_RDONLY);
 
        fd = mkstemp(tmpbuf);
        if (fd < 0) {
                dso->load_errno = errno;
-               goto out;
+               return -1;
        }
 
-       if (!decompress_to_file(m.ext, name, fd)) {
+       if (compressions[dso->comp].decompress(name, fd)) {
                dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
                close(fd);
                fd = -1;
        }
 
-out:
-       free(m.ext);
+       if (!pathname || (fd < 0))
+               unlink(tmpbuf);
+
+       if (pathname && (fd >= 0))
+               strncpy(pathname, tmpbuf, len);
+
        return fd;
 }
 
 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
 {
-       char tmpbuf[] = KMOD_DECOMP_NAME;
-       int fd;
-
-       fd = decompress_kmodule(dso, name, tmpbuf);
-       unlink(tmpbuf);
-       return fd;
+       return decompress_kmodule(dso, name, NULL, 0);
 }
 
 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
                                 char *pathname, size_t len)
 {
-       char tmpbuf[] = KMOD_DECOMP_NAME;
-       int fd;
+       int fd = decompress_kmodule(dso, name, pathname, len);
 
-       fd = decompress_kmodule(dso, name, tmpbuf);
-       if (fd < 0) {
-               unlink(tmpbuf);
-               return -1;
-       }
-
-       strncpy(pathname, tmpbuf, len);
        close(fd);
-       return 0;
+       return fd >= 0 ? 0 : -1;
 }
 
 /*
@@ -332,7 +331,7 @@ int dso__decompress_kmodule_path(struct dso *dso, const char *name,
  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
  */
 int __kmod_path__parse(struct kmod_path *m, const char *path,
-                      bool alloc_name, bool alloc_ext)
+                      bool alloc_name)
 {
        const char *name = strrchr(path, '/');
        const char *ext  = strrchr(path, '.');
@@ -372,10 +371,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
                return 0;
        }
 
-       if (is_supported_compression(ext + 1)) {
-               m->comp = true;
+       m->comp = is_supported_compression(ext + 1);
+       if (m->comp > COMP_ID__NONE)
                ext -= 3;
-       }
 
        /* Check .ko extension only if there's enough name left. */
        if (ext > name)
@@ -393,14 +391,6 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
                strxfrchar(m->name, '-', '_');
        }
 
-       if (alloc_ext && m->comp) {
-               m->ext = strdup(ext + 4);
-               if (!m->ext) {
-                       free((void *) m->name);
-                       return -ENOMEM;
-               }
-       }
-
        return 0;
 }
 
@@ -413,8 +403,10 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
                dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
 
        /* _KMODULE_COMP should be next to _KMODULE */
-       if (m->kmod && m->comp)
+       if (m->kmod && m->comp) {
                dso->symtab_type++;
+               dso->comp = m->comp;
+       }
 
        dso__set_short_name(dso, strdup(m->name), true);
 }
@@ -468,6 +460,7 @@ static int __open_dso(struct dso *dso, struct machine *machine)
        int fd = -EINVAL;
        char *root_dir = (char *)"";
        char *name = malloc(PATH_MAX);
+       bool decomp = false;
 
        if (!name)
                return -ENOMEM;
@@ -491,12 +484,13 @@ static int __open_dso(struct dso *dso, struct machine *machine)
                        goto out;
                }
 
+               decomp = true;
                strcpy(name, newpath);
        }
 
        fd = do_open(name);
 
-       if (dso__needs_decompress(dso))
+       if (decomp)
                unlink(name);
 
 out:
@@ -1218,6 +1212,7 @@ struct dso *dso__new(const char *name)
                dso->a2l_fails = 1;
                dso->kernel = DSO_TYPE_USER;
                dso->needs_swap = DSO_SWAP__UNSET;
+               dso->comp = COMP_ID__NONE;
                RB_CLEAR_NODE(&dso->rb_node);
                dso->root = NULL;
                INIT_LIST_HEAD(&dso->node);
index ef69de2e69ea74bd1e20ae590cc19ea9d4d817ed..c5380500bed40b6afd03aa2721559bad8192b440 100644 (file)
@@ -175,6 +175,7 @@ struct dso {
        u16              short_name_len;
        void            *dwfl;                  /* DWARF debug info */
        struct auxtrace_cache *auxtrace_cache;
+       int              comp;
 
        /* dso data file */
        struct {
@@ -250,9 +251,7 @@ int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir);
 char dso__symtab_origin(const struct dso *dso);
 int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
                                   char *root_dir, char *filename, size_t size);
-bool is_supported_compression(const char *ext);
 bool is_kernel_module(const char *pathname, int cpumode);
-bool decompress_to_file(const char *ext, const char *filename, int output_fd);
 bool dso__needs_decompress(struct dso *dso);
 int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
@@ -263,17 +262,15 @@ int dso__decompress_kmodule_path(struct dso *dso, const char *name,
 
 struct kmod_path {
        char *name;
-       char *ext;
-       bool  comp;
+       int   comp;
        bool  kmod;
 };
 
 int __kmod_path__parse(struct kmod_path *m, const char *path,
-                    bool alloc_name, bool alloc_ext);
+                    bool alloc_name);
 
-#define kmod_path__parse(__m, __p)      __kmod_path__parse(__m, __p, false, false)
-#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
-#define kmod_path__parse_ext(__m, __p)  __kmod_path__parse(__m, __p, false, true)
+#define kmod_path__parse(__m, __p)      __kmod_path__parse(__m, __p, false)
+#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true)
 
 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
                          struct machine *machine);
index 0c8ecf0c78a40ac1088d40742d371cae7fa3d6e7..0cd42150f712e88b89614952c65293a9451dffe9 100644 (file)
@@ -541,10 +541,17 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                                                      tgid, process, machine) < 0)
                        return -1;
 
+               /*
+                * send mmap only for thread group leader
+                * see thread__init_map_groups
+                */
+               if (pid == tgid &&
+                   perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+                                                      process, machine, mmap_data,
+                                                      proc_map_timeout))
+                       return -1;
 
-               return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
-                                                         process, machine, mmap_data,
-                                                         proc_map_timeout);
+               return 0;
        }
 
        if (machine__is_default_guest(machine))
index e7a4b31a84fb5f2316549d7ab31378c46589c73a..be440df296150450b4e99e2ab2c5ab83175d29aa 100644 (file)
@@ -803,7 +803,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                if (*output == -1) {
                        *output = fd;
 
-                       if (perf_mmap__mmap(&maps[idx], mp, *output < 0)
+                       if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
                                return -1;
                } else {
                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
index ddf84b941abf894de7299ee5f63d4f3992d6a006..1a61628a1c1262c86adff2d76c548985470d9e11 100644 (file)
@@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
 {
        struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
 
-       if (evsel != NULL)
-               perf_evsel__init(evsel, attr, idx);
+       if (!evsel)
+               return NULL;
+       perf_evsel__init(evsel, attr, idx);
 
        if (perf_evsel__is_bpf_output(evsel)) {
                evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
@@ -2683,7 +2684,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
 
 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
 {
-       return pevent_find_field(evsel->tp_format, name);
+       return tep_find_field(evsel->tp_format, name);
 }
 
 void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
index 973c03167947579ff424d8e181cdbf42eaedec2d..163c960614d336a62f943e2ccd2e4663e973a0ab 100644 (file)
@@ -452,11 +452,18 @@ static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
        return evsel->idx - evsel->leader->idx;
 }
 
+/* Iterates group WITHOUT the leader. */
 #define for_each_group_member(_evsel, _leader)                                         \
 for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node);     \
      (_evsel) && (_evsel)->leader == (_leader);                                        \
      (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
 
+/* Iterates group WITH the leader. */
+#define for_each_group_evsel(_evsel, _leader)                                  \
+for ((_evsel) = _leader;                                                       \
+     (_evsel) && (_evsel)->leader == (_leader);                                        \
+     (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
+
 static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
 {
        return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
index 5af58aac91ad2b186079914809934acecdf34578..3cadc252dd8977f4117419db240ac87183e54bea 100644 (file)
@@ -279,8 +279,6 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
        if (!set)
                return -ENOMEM;
 
-       bitmap_zero(set, size);
-
        p = (u64 *) set;
 
        for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
@@ -1285,7 +1283,6 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
                return -ENOMEM;
        }
 
-       bitmap_zero(n->set, size);
        n->node = idx;
        n->size = size;
 
@@ -3207,7 +3204,7 @@ static int read_attr(int fd, struct perf_header *ph,
 }
 
 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
-                                               struct pevent *pevent)
+                                               struct tep_handle *pevent)
 {
        struct event_format *event;
        char bf[128];
@@ -3221,7 +3218,7 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
                return -1;
        }
 
-       event = pevent_find_event(pevent, evsel->attr.config);
+       event = tep_find_event(pevent, evsel->attr.config);
        if (event == NULL) {
                pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
                return -1;
@@ -3239,7 +3236,7 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
 }
 
 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
-                                                 struct pevent *pevent)
+                                                 struct tep_handle *pevent)
 {
        struct perf_evsel *pos;
 
index 5e94857dfca2c8c47ae289b79dd2a717ae4a82b5..19262f98cd4e1252c09ca77e6319a02d72db50c0 100644 (file)
                "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \
                "-Wno-unused-value -Wno-pointer-sign "          \
                "-working-directory $WORKING_DIR "              \
-               "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
+               "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE"
 
 struct llvm_param llvm_param = {
        .clang_path = "clang",
+       .llc_path = "llc",
        .clang_bpf_cmd_template = CLANG_BPF_CMD_DEFAULT_TEMPLATE,
        .clang_opt = NULL,
+       .opts = NULL,
        .kbuild_dir = NULL,
        .kbuild_opts = NULL,
        .user_set_param = false,
@@ -51,6 +53,8 @@ int perf_llvm_config(const char *var, const char *value)
                llvm_param.kbuild_opts = strdup(value);
        else if (!strcmp(var, "dump-obj"))
                llvm_param.dump_obj = !!perf_config_bool(var, value);
+       else if (!strcmp(var, "opts"))
+               llvm_param.opts = strdup(value);
        else {
                pr_debug("Invalid LLVM config option: %s\n", value);
                return -1;
@@ -430,11 +434,13 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        unsigned int kernel_version;
        char linux_version_code_str[64];
        const char *clang_opt = llvm_param.clang_opt;
-       char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64];
+       char clang_path[PATH_MAX], llc_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64];
        char serr[STRERR_BUFSIZE];
        char *kbuild_dir = NULL, *kbuild_include_opts = NULL,
             *perf_bpf_include_opts = NULL;
        const char *template = llvm_param.clang_bpf_cmd_template;
+       char *pipe_template = NULL;
+       const char *opts = llvm_param.opts;
        char *command_echo = NULL, *command_out;
        char *perf_include_dir = system_path(PERF_INCLUDE_DIR);
 
@@ -484,6 +490,26 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts);
        force_set_env("WORKING_DIR", kbuild_dir ? : ".");
 
+       if (opts) {
+               err = search_program(llvm_param.llc_path, "llc", llc_path);
+               if (err) {
+                       pr_err("ERROR:\tunable to find llc.\n"
+                              "Hint:\tTry to install latest clang/llvm to support BPF. Check your $PATH\n"
+                              "     \tand 'llc-path' option in [llvm] section of ~/.perfconfig.\n");
+                       version_notice();
+                       goto errout;
+               }
+
+               if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -",
+                             template, llc_path, opts) < 0) {
+                       pr_err("ERROR:\tnot enough memory to setup command line\n");
+                       goto errout;
+               }
+
+               template = pipe_template;
+
+       }
+
        /*
         * Since we may reset clang's working dir, path of source file
         * should be transferred into absolute path, except we want
@@ -535,6 +561,7 @@ errout:
        free(obj_buf);
        free(perf_bpf_include_opts);
        free(perf_include_dir);
+       free(pipe_template);
        if (p_obj_buf)
                *p_obj_buf = NULL;
        if (p_obj_buf_sz)
index d3ad8deb5db4d359e28d8cec4794fe78ee34bf15..bf3f3f4c4fe20539ca1ae0158da982c5103fc1c4 100644 (file)
@@ -11,6 +11,8 @@
 struct llvm_param {
        /* Path of clang executable */
        const char *clang_path;
+       /* Path of llc executable */
+       const char *llc_path;
        /*
         * Template of clang bpf compiling. 5 env variables
         * can be used:
@@ -23,6 +25,13 @@ struct llvm_param {
        const char *clang_bpf_cmd_template;
        /* Will be filled in $CLANG_OPTIONS */
        const char *clang_opt;
+       /*
+        * If present it'll add -emit-llvm to $CLANG_OPTIONS to pipe
+        * the clang output to llc, useful for new llvm options not
+        * yet selectable via 'clang -mllvm option', such as -mattr=dwarfris
+        * in clang 6.0/llvm 7
+        */
+       const char *opts;
        /* Where to find kbuild system */
        const char *kbuild_dir;
        /*
index 07498eaddc0836c7339155eb4e0439e3044047ea..b1dd29a9d915efac1953d0d04ad9b99a5f3425de 100644 (file)
@@ -3,9 +3,13 @@
 #include <lzma.h>
 #include <stdio.h>
 #include <linux/compiler.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
 #include "compress.h"
 #include "util.h"
 #include "debug.h"
+#include <unistd.h>
 
 #define BUFSIZE 8192
 
@@ -99,3 +103,19 @@ err_fclose:
        fclose(infile);
        return err;
 }
+
+bool lzma_is_compressed(const char *input)
+{
+       int fd = open(input, O_RDONLY);
+       const uint8_t magic[6] = { 0xFD, '7', 'z', 'X', 'Z', 0x00 };
+       char buf[6] = { 0 };
+       ssize_t rc;
+
+       if (fd < 0)
+               return -1;
+
+       rc = read(fd, buf, sizeof(buf));
+       close(fd);
+       return rc == sizeof(buf) ?
+              memcmp(buf, magic, sizeof(buf)) == 0 : false;
+}
index b300a3973448a91fd4286af12605fd653fc2a74a..c4acd2001db0d054fd3c39f328f7fbf57aa0e8ea 100644 (file)
@@ -1212,8 +1212,10 @@ static int map_groups__set_module_path(struct map_groups *mg, const char *path,
         * Full name could reveal us kmod compression, so
         * we need to update the symtab_type if needed.
         */
-       if (m->comp && is_kmod_dso(map->dso))
+       if (m->comp && is_kmod_dso(map->dso)) {
                map->dso->symtab_type++;
+               map->dso->comp = m->comp;
+       }
 
        return 0;
 }
index 1de7660d93e97430382c00318dab288dd025ec39..d856b85862e23e9cd3026979ed29c0b75094d9de 100644 (file)
@@ -265,7 +265,7 @@ pid_t machine__get_current_tid(struct machine *machine, int cpu);
 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
                             pid_t tid);
 /*
- * For use with libtraceevent's pevent_set_function_resolver()
+ * For use with libtraceevent's tep_set_function_resolver()
  */
 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
 
index 89ac5b5dc218e874024f41cc2d730cd40152f9f6..6a6929f208b4da042eeece3006f9c53740002289 100644 (file)
@@ -381,20 +381,6 @@ struct map *map__clone(struct map *from)
        return map;
 }
 
-int map__overlap(struct map *l, struct map *r)
-{
-       if (l->start > r->start) {
-               struct map *t = l;
-               l = r;
-               r = t;
-       }
-
-       if (l->end > r->start)
-               return 1;
-
-       return 0;
-}
-
 size_t map__fprintf(struct map *map, FILE *fp)
 {
        return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
@@ -590,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
        return NULL;
 }
 
+static bool map__contains_symbol(struct map *map, struct symbol *sym)
+{
+       u64 ip = map->unmap_ip(map, sym->start);
+
+       return ip >= map->start && ip < map->end;
+}
+
 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
                                         struct map **mapp)
 {
@@ -605,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 
                if (sym == NULL)
                        continue;
+               if (!map__contains_symbol(pos, sym)) {
+                       sym = NULL;
+                       continue;
+               }
                if (mapp != NULL)
                        *mapp = pos;
                goto out;
@@ -675,20 +672,42 @@ static void __map_groups__insert(struct map_groups *mg, struct map *map)
 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
 {
        struct rb_root *root;
-       struct rb_node *next;
+       struct rb_node *next, *first;
        int err = 0;
 
        down_write(&maps->lock);
 
        root = &maps->entries;
-       next = rb_first(root);
 
+       /*
+        * Find first map where end > map->start.
+        * Same as find_vma() in kernel.
+        */
+       next = root->rb_node;
+       first = NULL;
+       while (next) {
+               struct map *pos = rb_entry(next, struct map, rb_node);
+
+               if (pos->end > map->start) {
+                       first = next;
+                       if (pos->start <= map->start)
+                               break;
+                       next = next->rb_left;
+               } else
+                       next = next->rb_right;
+       }
+
+       next = first;
        while (next) {
                struct map *pos = rb_entry(next, struct map, rb_node);
                next = rb_next(&pos->rb_node);
 
-               if (!map__overlap(pos, map))
-                       continue;
+               /*
+                * Stop if current map starts after map->end.
+                * Maps are ordered by start: next will not overlap for sure.
+                */
+               if (pos->start >= map->end)
+                       break;
 
                if (verbose >= 2) {
 
index 4cb90f242bed882e35821b5e6a12c4f2abe90ba8..e0f327b51e6605305e148843a4796c7b61f2376b 100644 (file)
@@ -166,7 +166,6 @@ static inline void __map__zput(struct map **map)
 
 #define map__zput(map) __map__zput(&map)
 
-int map__overlap(struct map *l, struct map *r);
 size_t map__fprintf(struct map *map, FILE *fp);
 size_t map__fprintf_dsoname(struct map *map, FILE *fp);
 char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
index fc832676a7985f13c4ba7f419b380b7fa2425798..215f69f41672dcc44b6e57948ee3a9fbcdec2724 100644 (file)
@@ -164,7 +164,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
        auxtrace_mmap__munmap(&map->auxtrace_mmap);
 }
 
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
+int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
 {
        /*
         * The last one will be done at perf_mmap__consume(), so that we
@@ -191,6 +191,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
                return -1;
        }
        map->fd = fd;
+       map->cpu = cpu;
 
        if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
                                &mp->auxtrace_mp, map->base, fd))
index d82294db1295333096eb1111349f1411ff7f8120..05a6d47c79561d5a62c04ae9d600a0ecd16f39b4 100644 (file)
@@ -18,6 +18,7 @@ struct perf_mmap {
        void             *base;
        int              mask;
        int              fd;
+       int              cpu;
        refcount_t       refcnt;
        u64              prev;
        u64              start;
@@ -60,7 +61,7 @@ struct mmap_params {
        struct auxtrace_mmap_params auxtrace_mp;
 };
 
-int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd);
+int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
 void perf_mmap__munmap(struct perf_mmap *map);
 
 void perf_mmap__get(struct perf_mmap *map);
index 5be021701f34e758aae5eda1d95bb7a9ce49ebad..cf8bd123cf73fb017cde3d60c7b912743847b965 100644 (file)
@@ -139,6 +139,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi)
 {
        struct nsinfo *nnsi;
 
+       if (nsi == NULL)
+               return NULL;
+
        nnsi = calloc(1, sizeof(*nnsi));
        if (nnsi != NULL) {
                nnsi->pid = nsi->pid;
index 15eec49e71a12ad89596f928e39874522c4a65df..f8cd3e7c918668cc1f593b539c6648d16b091726 100644 (file)
@@ -1991,8 +1991,11 @@ static int set_filter(struct perf_evsel *evsel, const void *arg)
        int nr_addr_filters = 0;
        struct perf_pmu *pmu = NULL;
 
-       if (evsel == NULL)
-               goto err;
+       if (evsel == NULL) {
+               fprintf(stderr,
+                       "--filter option should follow a -e tracepoint or HW tracer option\n");
+               return -1;
+       }
 
        if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
                if (perf_evsel__append_tp_filter(evsel, str) < 0) {
@@ -2014,8 +2017,11 @@ static int set_filter(struct perf_evsel *evsel, const void *arg)
                perf_pmu__scan_file(pmu, "nr_addr_filters",
                                    "%d", &nr_addr_filters);
 
-       if (!nr_addr_filters)
-               goto err;
+       if (!nr_addr_filters) {
+               fprintf(stderr,
+                       "This CPU does not support address filtering\n");
+               return -1;
+       }
 
        if (perf_evsel__append_addr_filter(evsel, str) < 0) {
                fprintf(stderr,
@@ -2024,12 +2030,6 @@ static int set_filter(struct perf_evsel *evsel, const void *arg)
        }
 
        return 0;
-
-err:
-       fprintf(stderr,
-               "--filter option should follow a -e tracepoint or HW tracer option\n");
-
-       return -1;
 }
 
 int parse_filter(const struct option *opt, const char *str,
index 863b61478edd6d21533b925dde01abef35354a80..ce501ba14b088b112ed75ae4402562b4bcbd8eb1 100644 (file)
@@ -11,6 +11,7 @@
 #include "cpumap.h"
 #include "print_binary.h"
 #include "thread_map.h"
+#include "mmap.h"
 
 #if PY_MAJOR_VERSION < 3
 #define _PyUnicode_FromString(arg) \
@@ -341,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent)
 static PyObject*
 tracepoint_field(struct pyrf_event *pe, struct format_field *field)
 {
-       struct pevent *pevent = field->event->pevent;
+       struct tep_handle *pevent = field->event->pevent;
        void *data = pe->sample.raw_data;
        PyObject *ret = NULL;
        unsigned long long val;
@@ -351,7 +352,7 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field)
                offset = field->offset;
                len    = field->size;
                if (field->flags & FIELD_IS_DYNAMIC) {
-                       val     = pevent_read_number(pevent, data + offset, len);
+                       val     = tep_read_number(pevent, data + offset, len);
                        offset  = val;
                        len     = offset >> 16;
                        offset &= 0xffff;
@@ -364,8 +365,8 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field)
                        field->flags &= ~FIELD_IS_STRING;
                }
        } else {
-               val = pevent_read_number(pevent, data + field->offset,
-                                        field->size);
+               val = tep_read_number(pevent, data + field->offset,
+                                     field->size);
                if (field->flags & FIELD_IS_POINTER)
                        ret = PyLong_FromUnsignedLong((unsigned long) val);
                else if (field->flags & FIELD_IS_SIGNED)
@@ -394,7 +395,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
                evsel->tp_format = tp_format;
        }
 
-       field = pevent_find_any_field(evsel->tp_format, str);
+       field = tep_find_any_field(evsel->tp_format, str);
        if (!field)
                return NULL;
 
@@ -976,6 +977,20 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
        return Py_BuildValue("i", evlist->nr_entries);
 }
 
+static struct perf_mmap *get_md(struct perf_evlist *evlist, int cpu)
+{
+       int i;
+
+       for (i = 0; i < evlist->nr_mmaps; i++) {
+               struct perf_mmap *md = &evlist->mmap[i];
+
+               if (md->cpu == cpu)
+                       return md;
+       }
+
+       return NULL;
+}
+
 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                                          PyObject *args, PyObject *kwargs)
 {
@@ -990,7 +1005,10 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                                         &cpu, &sample_id_all))
                return NULL;
 
-       md = &evlist->mmap[cpu];
+       md = get_md(evlist, cpu);
+       if (!md)
+               return NULL;
+
        if (perf_mmap__read_init(md) < 0)
                goto end;
 
diff --git a/tools/perf/util/s390-cpumsf-kernel.h b/tools/perf/util/s390-cpumsf-kernel.h
new file mode 100644 (file)
index 0000000..de8c7ad
--- /dev/null
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Auxtrace support for s390 CPU measurement sampling facility
+ *
+ *  Copyright IBM Corp. 2018
+ *  Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ *            Thomas Richter <tmricht@linux.ibm.com>
+ */
+#ifndef S390_CPUMSF_KERNEL_H
+#define S390_CPUMSF_KERNEL_H
+
+#define        S390_CPUMSF_PAGESZ      4096    /* Size of sample block units */
+#define        S390_CPUMSF_DIAG_DEF_FIRST      0x8001  /* Diagnostic entry lowest id */
+
+struct hws_basic_entry {
+       unsigned int def:16;        /* 0-15  Data Entry Format           */
+       unsigned int R:4;           /* 16-19 reserved                    */
+       unsigned int U:4;           /* 20-23 Number of unique instruct.  */
+       unsigned int z:2;           /* zeros                             */
+       unsigned int T:1;           /* 26 PSW DAT mode                   */
+       unsigned int W:1;           /* 27 PSW wait state                 */
+       unsigned int P:1;           /* 28 PSW Problem state              */
+       unsigned int AS:2;          /* 29-30 PSW address-space control   */
+       unsigned int I:1;           /* 31 entry valid or invalid         */
+       unsigned int CL:2;          /* 32-33 Configuration Level         */
+       unsigned int:14;
+       unsigned int prim_asn:16;   /* primary ASN                       */
+       unsigned long long ia;      /* Instruction Address               */
+       unsigned long long gpp;     /* Guest Program Parameter           */
+       unsigned long long hpp;     /* Host Program Parameter            */
+};
+
+struct hws_diag_entry {
+       unsigned int def:16;        /* 0-15  Data Entry Format           */
+       unsigned int R:15;          /* 16-19 and 20-30 reserved          */
+       unsigned int I:1;           /* 31 entry valid or invalid         */
+       u8           data[];        /* Machine-dependent sample data     */
+};
+
+struct hws_combined_entry {
+       struct hws_basic_entry  basic;  /* Basic-sampling data entry */
+       struct hws_diag_entry   diag;   /* Diagnostic-sampling data entry */
+};
+
+struct hws_trailer_entry {
+       union {
+               struct {
+                       unsigned int f:1;       /* 0 - Block Full Indicator   */
+                       unsigned int a:1;       /* 1 - Alert request control  */
+                       unsigned int t:1;       /* 2 - Timestamp format       */
+                       unsigned int:29;        /* 3 - 31: Reserved           */
+                       unsigned int bsdes:16;  /* 32-47: size of basic SDE   */
+                       unsigned int dsdes:16;  /* 48-63: size of diagnostic SDE */
+               };
+               unsigned long long flags;       /* 0 - 64: All indicators     */
+       };
+       unsigned long long overflow;     /* 64 - sample Overflow count        */
+       unsigned char timestamp[16];     /* 16 - 31 timestamp                 */
+       unsigned long long reserved1;    /* 32 -Reserved                      */
+       unsigned long long reserved2;    /*                                   */
+       union {                          /* 48 - reserved for programming use */
+               struct {
+                       unsigned long long clock_base:1; /* in progusage2 */
+                       unsigned long long progusage1:63;
+                       unsigned long long progusage2;
+               };
+               unsigned long long progusage[2];
+       };
+};
+
+#endif
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
new file mode 100644 (file)
index 0000000..d2c78ff
--- /dev/null
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2018
+ * Auxtrace support for s390 CPU-Measurement Sampling Facility
+ *
+ * Author(s):  Thomas Richter <tmricht@linux.ibm.com>
+ *
+ * Auxiliary traces are collected during 'perf record' using rbd000 event.
+ * Several PERF_RECORD_XXX are generated during recording:
+ *
+ * PERF_RECORD_AUX:
+ *     Records that new data landed in the AUX buffer part.
+ * PERF_RECORD_AUXTRACE:
+ *     Defines auxtrace data. Followed by the actual data. The contents of
+ *     the auxtrace data is dependent on the event and the CPU.
+ *     This record is generated by perf record command. For details
+ *     see Documentation/perf.data-file-format.txt.
+ * PERF_RECORD_AUXTRACE_INFO:
+ *     Defines a table of contains for PERF_RECORD_AUXTRACE records. This
+ *     record is generated during 'perf record' command. Each record contains up
+ *     to 256 entries describing offset and size of the AUXTRACE data in the
+ *     perf.data file.
+ * PERF_RECORD_AUXTRACE_ERROR:
+ *     Indicates an error during AUXTRACE collection such as buffer overflow.
+ * PERF_RECORD_FINISHED_ROUND:
+ *     Perf events are not necessarily in time stamp order, as they can be
+ *     collected in parallel on different CPUs. If the events should be
+ *     processed in time order they need to be sorted first.
+ *     Perf report guarantees that there is no reordering over a
+ *     PERF_RECORD_FINISHED_ROUND boundary event. All perf records with a
+ *     time stamp lower than this record are processed (and displayed) before
+ *     the succeeding perf record are processed.
+ *
+ * These records are evaluated during perf report command.
+ *
+ * 1. PERF_RECORD_AUXTRACE_INFO is used to set up the infrastructure for
+ * auxiliary trace data processing. See s390_cpumsf_process_auxtrace_info()
+ * below.
+ * Auxiliary trace data is collected per CPU. To merge the data into the report
+ * an auxtrace_queue is created for each CPU. It is assumed that the auxtrace
+ * data is in ascending order.
+ *
+ * Each queue has a double linked list of auxtrace_buffers. This list contains
+ * the offset and size of a CPU's auxtrace data. During auxtrace processing
+ * the data portion is mmap()'ed.
+ *
+ * To sort the queues in chronological order, all queue access is controlled
+ * by the auxtrace_heap. This is basicly a stack, each stack element has two
+ * entries, the queue number and a time stamp. However the stack is sorted by
+ * the time stamps. The highest time stamp is at the bottom the lowest
+ * (nearest) time stamp is at the top. That sort order is maintained at all
+ * times!
+ *
+ * After the auxtrace infrastructure has been setup, the auxtrace queues are
+ * filled with data (offset/size pairs) and the auxtrace_heap is populated.
+ *
+ * 2. PERF_RECORD_XXX processing triggers access to the auxtrace_queues.
+ * Each record is handled by s390_cpumsf_process_event(). The time stamp of
+ * the perf record is compared with the time stamp located on the auxtrace_heap
+ * top element. If that time stamp is lower than the time stamp from the
+ * record sample, the auxtrace queues will be processed. As auxtrace queues
+ * control many auxtrace_buffers and each buffer can be quite large, the
+ * auxtrace buffer might be processed only partially. In this case the
+ * position in the auxtrace_buffer of that queue is remembered and the time
+ * stamp of the last processed entry of the auxtrace_buffer replaces the
+ * current auxtrace_heap top.
+ *
+ * 3. Auxtrace_queues might run of out data and are feeded by the
+ * PERF_RECORD_AUXTRACE handling, see s390_cpumsf_process_auxtrace_event().
+ *
+ * Event Generation
+ * Each sampling-data entry in the auxilary trace data generates a perf sample.
+ * This sample is filled
+ * with data from the auxtrace such as PID/TID, instruction address, CPU state,
+ * etc. This sample is processed with perf_session__deliver_synth_event() to
+ * be included into the GUI.
+ *
+ * 4. PERF_RECORD_FINISHED_ROUND event is used to process all the remaining
+ * auxiliary traces entries until the time stamp of this record is reached
+ * auxtrace_heap top. This is triggered by ordered_event->deliver().
+ *
+ *
+ * Perf event processing.
+ * Event processing of PERF_RECORD_XXX entries relies on time stamp entries.
+ * This is the function call sequence:
+ *
+ * __cmd_report()
+ * |
+ * perf_session__process_events()
+ * |
+ * __perf_session__process_events()
+ * |
+ * perf_session__process_event()
+ * |  This functions splits the PERF_RECORD_XXX records.
+ * |  - Those generated by perf record command (type number equal or higher
+ * |    than PERF_RECORD_USER_TYPE_START) are handled by
+ * |    perf_session__process_user_event(see below)
+ * |  - Those generated by the kernel are handled by
+ * |    perf_evlist__parse_sample_timestamp()
+ * |
+ * perf_evlist__parse_sample_timestamp()
+ * |  Extract time stamp from sample data.
+ * |
+ * perf_session__queue_event()
+ * |  If timestamp is positive the sample is entered into an ordered_event
+ * |  list, sort order is the timestamp. The event processing is deferred until
+ * |  later (see perf_session__process_user_event()).
+ * |  Other timestamps (0 or -1) are handled immediately by
+ * |  perf_session__deliver_event(). These are events generated at start up
+ * |  of command perf record. They create PERF_RECORD_COMM and PERF_RECORD_MMAP*
+ * |  records. They are needed to create a list of running processes and its
+ * |  memory mappings and layout. They are needed at the beginning to enable
+ * |  command perf report to create process trees and memory mappings.
+ * |
+ * perf_session__deliver_event()
+ * |  Delivers a PERF_RECORD_XXX entry for handling.
+ * |
+ * auxtrace__process_event()
+ * |  The timestamp of the PERF_RECORD_XXX entry is taken to correlate with
+ * |  time stamps from the auxiliary trace buffers. This enables
+ * |  synchronization between auxiliary trace data and the events on the
+ * |  perf.data file.
+ * |
+ * machine__deliver_event()
+ * |  Handles the PERF_RECORD_XXX event. This depends on the record type.
+ *    It might update the process tree, update a process memory map or enter
+ *    a sample with IP and call back chain data into GUI data pool.
+ *
+ *
+ * Deferred processing determined by perf_session__process_user_event() is
+ * finally processed when a PERF_RECORD_FINISHED_ROUND is encountered. These
+ * are generated during command perf record.
+ * The timestamp of PERF_RECORD_FINISHED_ROUND event is taken to process all
+ * PERF_RECORD_XXX entries stored in the ordered_event list. This list was
+ * built up while reading the perf.data file.
+ * Each event is now processed by calling perf_session__deliver_event().
+ * This enables time synchronization between the data in the perf.data file and
+ * the data in the auxiliary trace buffers.
+ */
+
+#include <endian.h>
+#include <errno.h>
+#include <byteswap.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "cpumap.h"
+#include "color.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "session.h"
+#include "util.h"
+#include "thread.h"
+#include "debug.h"
+#include "auxtrace.h"
+#include "s390-cpumsf.h"
+#include "s390-cpumsf-kernel.h"
+
+struct s390_cpumsf {
+       struct auxtrace         auxtrace;
+       struct auxtrace_queues  queues;
+       struct auxtrace_heap    heap;
+       struct perf_session     *session;
+       struct machine          *machine;
+       u32                     auxtrace_type;
+       u32                     pmu_type;
+       u16                     machine_type;
+       bool                    data_queued;
+};
+
+struct s390_cpumsf_queue {
+       struct s390_cpumsf      *sf;
+       unsigned int            queue_nr;
+       struct auxtrace_buffer  *buffer;
+       int                     cpu;
+};
+
+/* Display s390 CPU measurement facility basic-sampling data entry */
+static bool s390_cpumsf_basic_show(const char *color, size_t pos,
+                                  struct hws_basic_entry *basic)
+{
+       if (basic->def != 1) {
+               pr_err("Invalid AUX trace basic entry [%#08zx]\n", pos);
+               return false;
+       }
+       color_fprintf(stdout, color, "    [%#08zx] Basic   Def:%04x Inst:%#04x"
+                     " %c%c%c%c AS:%d ASN:%#04x IA:%#018llx\n"
+                     "\t\tCL:%d HPP:%#018llx GPP:%#018llx\n",
+                     pos, basic->def, basic->U,
+                     basic->T ? 'T' : ' ',
+                     basic->W ? 'W' : ' ',
+                     basic->P ? 'P' : ' ',
+                     basic->I ? 'I' : ' ',
+                     basic->AS, basic->prim_asn, basic->ia, basic->CL,
+                     basic->hpp, basic->gpp);
+       return true;
+}
+
+/* Display s390 CPU measurement facility diagnostic-sampling data entry */
+static bool s390_cpumsf_diag_show(const char *color, size_t pos,
+                                 struct hws_diag_entry *diag)
+{
+       if (diag->def < S390_CPUMSF_DIAG_DEF_FIRST) {
+               pr_err("Invalid AUX trace diagnostic entry [%#08zx]\n", pos);
+               return false;
+       }
+       color_fprintf(stdout, color, "    [%#08zx] Diag    Def:%04x %c\n",
+                     pos, diag->def, diag->I ? 'I' : ' ');
+       return true;
+}
+
+/* Return TOD timestamp contained in an trailer entry */
+static unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
+{
+       /* te->t set: TOD in STCKE format, bytes 8-15
+        * to->t not set: TOD in STCK format, bytes 0-7
+        */
+       unsigned long long ts;
+
+       memcpy(&ts, &te->timestamp[te->t], sizeof(ts));
+       return ts;
+}
+
+/* Display s390 CPU measurement facility trailer entry */
+static bool s390_cpumsf_trailer_show(const char *color, size_t pos,
+                                    struct hws_trailer_entry *te)
+{
+       if (te->bsdes != sizeof(struct hws_basic_entry)) {
+               pr_err("Invalid AUX trace trailer entry [%#08zx]\n", pos);
+               return false;
+       }
+       color_fprintf(stdout, color, "    [%#08zx] Trailer %c%c%c bsdes:%d"
+                     " dsdes:%d Overflow:%lld Time:%#llx\n"
+                     "\t\tC:%d TOD:%#lx 1:%#llx 2:%#llx\n",
+                     pos,
+                     te->f ? 'F' : ' ',
+                     te->a ? 'A' : ' ',
+                     te->t ? 'T' : ' ',
+                     te->bsdes, te->dsdes, te->overflow,
+                     trailer_timestamp(te), te->clock_base, te->progusage2,
+                     te->progusage[0], te->progusage[1]);
+       return true;
+}
+
+/* Test a sample data block. It must be 4KB or a multiple thereof in size and
+ * 4KB page aligned. Each sample data page has a trailer entry at the
+ * end which contains the sample entry data sizes.
+ *
+ * Return true if the sample data block passes the checks and set the
+ * basic set entry size and diagnostic set entry size.
+ *
+ * Return false on failure.
+ *
+ * Note: Old hardware does not set the basic or diagnostic entry sizes
+ * in the trailer entry. Use the type number instead.
+ */
+static bool s390_cpumsf_validate(int machine_type,
+                                unsigned char *buf, size_t len,
+                                unsigned short *bsdes,
+                                unsigned short *dsdes)
+{
+       struct hws_basic_entry *basic = (struct hws_basic_entry *)buf;
+       struct hws_trailer_entry *te;
+
+       *dsdes = *bsdes = 0;
+       if (len & (S390_CPUMSF_PAGESZ - 1))     /* Illegal size */
+               return false;
+       if (basic->def != 1)            /* No basic set entry, must be first */
+               return false;
+       /* Check for trailer entry at end of SDB */
+       te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
+                                             - sizeof(*te));
+       *bsdes = te->bsdes;
+       *dsdes = te->dsdes;
+       if (!te->bsdes && !te->dsdes) {
+               /* Very old hardware, use CPUID */
+               switch (machine_type) {
+               case 2097:
+               case 2098:
+                       *dsdes = 64;
+                       *bsdes = 32;
+                       break;
+               case 2817:
+               case 2818:
+                       *dsdes = 74;
+                       *bsdes = 32;
+                       break;
+               case 2827:
+               case 2828:
+                       *dsdes = 85;
+                       *bsdes = 32;
+                       break;
+               default:
+                       /* Illegal trailer entry */
+                       return false;
+               }
+       }
+       return true;
+}
+
+/* Return true if there is room for another entry */
+static bool s390_cpumsf_reached_trailer(size_t entry_sz, size_t pos)
+{
+       size_t payload = S390_CPUMSF_PAGESZ - sizeof(struct hws_trailer_entry);
+
+       if (payload - (pos & (S390_CPUMSF_PAGESZ - 1)) < entry_sz)
+               return false;
+       return true;
+}
+
+/* Dump an auxiliary buffer. These buffers are multiple of
+ * 4KB SDB pages.
+ */
+static void s390_cpumsf_dump(struct s390_cpumsf *sf,
+                            unsigned char *buf, size_t len)
+{
+       const char *color = PERF_COLOR_BLUE;
+       struct hws_basic_entry *basic;
+       struct hws_diag_entry *diag;
+       unsigned short bsdes, dsdes;
+       size_t pos = 0;
+
+       color_fprintf(stdout, color,
+                     ". ... s390 AUX data: size %zu bytes\n",
+                     len);
+
+       if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes,
+                                 &dsdes)) {
+               pr_err("Invalid AUX trace data block size:%zu"
+                      " (type:%d bsdes:%hd dsdes:%hd)\n",
+                      len, sf->machine_type, bsdes, dsdes);
+               return;
+       }
+
+       /* s390 kernel always returns 4KB blocks fully occupied,
+        * no partially filled SDBs.
+        */
+       while (pos < len) {
+               /* Handle Basic entry */
+               basic = (struct hws_basic_entry *)(buf + pos);
+               if (s390_cpumsf_basic_show(color, pos, basic))
+                       pos += bsdes;
+               else
+                       return;
+
+               /* Handle Diagnostic entry */
+               diag = (struct hws_diag_entry *)(buf + pos);
+               if (s390_cpumsf_diag_show(color, pos, diag))
+                       pos += dsdes;
+               else
+                       return;
+
+               /* Check for trailer entry */
+               if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
+                       /* Show trailer entry */
+                       struct hws_trailer_entry te;
+
+                       pos = (pos + S390_CPUMSF_PAGESZ)
+                              & ~(S390_CPUMSF_PAGESZ - 1);
+                       pos -= sizeof(te);
+                       memcpy(&te, buf + pos, sizeof(te));
+                       /* Set descriptor sizes in case of old hardware
+                        * where these values are not set.
+                        */
+                       te.bsdes = bsdes;
+                       te.dsdes = dsdes;
+                       if (s390_cpumsf_trailer_show(color, pos, &te))
+                               pos += sizeof(te);
+                       else
+                               return;
+               }
+       }
+}
+
+static void s390_cpumsf_dump_event(struct s390_cpumsf *sf, unsigned char *buf,
+                                  size_t len)
+{
+       printf(".\n");
+       s390_cpumsf_dump(sf, buf, len);
+}
+
+#define        S390_LPP_PID_MASK       0xffffffff
+
+static bool s390_cpumsf_make_event(size_t pos,
+                                  struct hws_basic_entry *basic,
+                                  struct s390_cpumsf_queue *sfq)
+{
+       struct perf_sample sample = {
+                               .ip = basic->ia,
+                               .pid = basic->hpp & S390_LPP_PID_MASK,
+                               .tid = basic->hpp & S390_LPP_PID_MASK,
+                               .cpumode = PERF_RECORD_MISC_CPUMODE_UNKNOWN,
+                               .cpu = sfq->cpu,
+                               .period = 1
+                           };
+       union perf_event event;
+
+       memset(&event, 0, sizeof(event));
+       if (basic->CL == 1)     /* Native LPAR mode */
+               sample.cpumode = basic->P ? PERF_RECORD_MISC_USER
+                                         : PERF_RECORD_MISC_KERNEL;
+       else if (basic->CL == 2)        /* Guest kernel/user space */
+               sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER
+                                         : PERF_RECORD_MISC_GUEST_KERNEL;
+       else if (basic->gpp || basic->prim_asn != 0xffff)
+               /* Use heuristics on old hardware */
+               sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER
+                                         : PERF_RECORD_MISC_GUEST_KERNEL;
+       else
+               sample.cpumode = basic->P ? PERF_RECORD_MISC_USER
+                                         : PERF_RECORD_MISC_KERNEL;
+
+       event.sample.header.type = PERF_RECORD_SAMPLE;
+       event.sample.header.misc = sample.cpumode;
+       event.sample.header.size = sizeof(struct perf_event_header);
+
+       pr_debug4("%s pos:%#zx ip:%#" PRIx64 " P:%d CL:%d pid:%d.%d cpumode:%d cpu:%d\n",
+                __func__, pos, sample.ip, basic->P, basic->CL, sample.pid,
+                sample.tid, sample.cpumode, sample.cpu);
+       if (perf_session__deliver_synth_event(sfq->sf->session, &event,
+                                             &sample)) {
+               pr_err("s390 Auxiliary Trace: failed to deliver event\n");
+               return false;
+       }
+       return true;
+}
+
+static unsigned long long get_trailer_time(const unsigned char *buf)
+{
+       struct hws_trailer_entry *te;
+       unsigned long long aux_time;
+
+       te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ
+                                             - sizeof(*te));
+
+       if (!te->clock_base)    /* TOD_CLOCK_BASE value missing */
+               return 0;
+
+       /* Correct calculation to convert time stamp in trailer entry to
+        * nano seconds (taken from arch/s390 function tod_to_ns()).
+        * TOD_CLOCK_BASE is stored in trailer entry member progusage2.
+        */
+       aux_time = trailer_timestamp(te) - te->progusage2;
+       aux_time = (aux_time >> 9) * 125 + (((aux_time & 0x1ff) * 125) >> 9);
+       return aux_time;
+}
+
+/* Process the data samples of a single queue. The first parameter is a
+ * pointer to the queue, the second parameter is the time stamp. This
+ * is the time stamp:
+ * - of the event that triggered this processing.
+ * - or the time stamp when the last proccesing of this queue stopped.
+ *   In this case it stopped at a 4KB page boundary and record the
+ *   position on where to continue processing on the next invocation
+ *   (see buffer->use_data and buffer->use_size).
+ *
+ * When this function returns the second parameter is updated to
+ * reflect the time stamp of the last processed auxiliary data entry
+ * (taken from the trailer entry of that page). The caller uses this
+ * returned time stamp to record the last processed entry in this
+ * queue.
+ *
+ * The function returns:
+ * 0:  Processing successful. The second parameter returns the
+ *     time stamp from the trailer entry until which position
+ *     processing took place. Subsequent calls resume from this
+ *     position.
+ * <0: An error occurred during processing. The second parameter
+ *     returns the maximum time stamp.
+ * >0: Done on this queue. The second parameter returns the
+ *     maximum time stamp.
+ */
+static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
+{
+       struct s390_cpumsf *sf = sfq->sf;
+       unsigned char *buf = sfq->buffer->use_data;
+       size_t len = sfq->buffer->use_size;
+       struct hws_basic_entry *basic;
+       unsigned short bsdes, dsdes;
+       size_t pos = 0;
+       int err = 1;
+       u64 aux_ts;
+
+       if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes,
+                                 &dsdes)) {
+               *ts = ~0ULL;
+               return -1;
+       }
+
+       /* Get trailer entry time stamp and check if entries in
+        * this auxiliary page are ready for processing. If the
+        * time stamp of the first entry is too high, whole buffer
+        * can be skipped. In this case return time stamp.
+        */
+       aux_ts = get_trailer_time(buf);
+       if (!aux_ts) {
+               pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
+                      sfq->buffer->data_offset);
+               aux_ts = ~0ULL;
+               goto out;
+       }
+       if (aux_ts > *ts) {
+               *ts = aux_ts;
+               return 0;
+       }
+
+       while (pos < len) {
+               /* Handle Basic entry */
+               basic = (struct hws_basic_entry *)(buf + pos);
+               if (s390_cpumsf_make_event(pos, basic, sfq))
+                       pos += bsdes;
+               else {
+                       err = -EBADF;
+                       goto out;
+               }
+
+               pos += dsdes;   /* Skip diagnositic entry */
+
+               /* Check for trailer entry */
+               if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
+                       pos = (pos + S390_CPUMSF_PAGESZ)
+                              & ~(S390_CPUMSF_PAGESZ - 1);
+                       /* Check existence of next page */
+                       if (pos >= len)
+                               break;
+                       aux_ts = get_trailer_time(buf + pos);
+                       if (!aux_ts) {
+                               aux_ts = ~0ULL;
+                               goto out;
+                       }
+                       if (aux_ts > *ts) {
+                               *ts = aux_ts;
+                               sfq->buffer->use_data += pos;
+                               sfq->buffer->use_size -= pos;
+                               return 0;
+                       }
+               }
+       }
+out:
+       *ts = aux_ts;
+       sfq->buffer->use_size = 0;
+       sfq->buffer->use_data = NULL;
+       return err;     /* Buffer completely scanned or error */
+}
+
+/* Run the s390 auxiliary trace decoder.
+ * Select the queue buffer to operate on, the caller already selected
+ * the proper queue, depending on second parameter 'ts'.
+ * This is the time stamp until which the auxiliary entries should
+ * be processed. This value is updated by called functions and
+ * returned to the caller.
+ *
+ * Resume processing in the current buffer. If there is no buffer
+ * get a new buffer from the queue and setup start position for
+ * processing.
+ * When a buffer is completely processed remove it from the queue
+ * before returning.
+ *
+ * This function returns
+ * 1: When the queue is empty. Second parameter will be set to
+ *    maximum time stamp.
+ * 0: Normal processing done.
+ * <0: Error during queue buffer setup. This causes the caller
+ *     to stop processing completely.
+ */
+static int s390_cpumsf_run_decoder(struct s390_cpumsf_queue *sfq,
+                                  u64 *ts)
+{
+
+       struct auxtrace_buffer *buffer;
+       struct auxtrace_queue *queue;
+       int err;
+
+       queue = &sfq->sf->queues.queue_array[sfq->queue_nr];
+
+       /* Get buffer and last position in buffer to resume
+        * decoding the auxiliary entries. One buffer might be large
+        * and decoding might stop in between. This depends on the time
+        * stamp of the trailer entry in each page of the auxiliary
+        * data and the time stamp of the event triggering the decoding.
+        */
+       if (sfq->buffer == NULL) {
+               sfq->buffer = buffer = auxtrace_buffer__next(queue,
+                                                            sfq->buffer);
+               if (!buffer) {
+                       *ts = ~0ULL;
+                       return 1;       /* Processing done on this queue */
+               }
+               /* Start with a new buffer on this queue */
+               if (buffer->data) {
+                       buffer->use_size = buffer->size;
+                       buffer->use_data = buffer->data;
+               }
+       } else
+               buffer = sfq->buffer;
+
+       if (!buffer->data) {
+               int fd = perf_data__fd(sfq->sf->session->data);
+
+               buffer->data = auxtrace_buffer__get_data(buffer, fd);
+               if (!buffer->data)
+                       return -ENOMEM;
+               buffer->use_size = buffer->size;
+               buffer->use_data = buffer->data;
+       }
+       pr_debug4("%s queue_nr:%d buffer:%" PRId64 " offset:%#" PRIx64 " size:%#zx rest:%#zx\n",
+                 __func__, sfq->queue_nr, buffer->buffer_nr, buffer->offset,
+                 buffer->size, buffer->use_size);
+       err = s390_cpumsf_samples(sfq, ts);
+
+       /* If non-zero, there is either an error (err < 0) or the buffer is
+        * completely done (err > 0). The error is unrecoverable, usually
+        * some descriptors could not be read successfully, so continue with
+        * the next buffer.
+        * In both cases the parameter 'ts' has been updated.
+        */
+       if (err) {
+               sfq->buffer = NULL;
+               list_del(&buffer->list);
+               auxtrace_buffer__free(buffer);
+               if (err > 0)            /* Buffer done, no error */
+                       err = 0;
+       }
+       return err;
+}
+
+static struct s390_cpumsf_queue *
+s390_cpumsf_alloc_queue(struct s390_cpumsf *sf, unsigned int queue_nr)
+{
+       struct s390_cpumsf_queue *sfq;
+
+       sfq = zalloc(sizeof(struct s390_cpumsf_queue));
+       if (sfq == NULL)
+               return NULL;
+
+       sfq->sf = sf;
+       sfq->queue_nr = queue_nr;
+       sfq->cpu = -1;
+       return sfq;
+}
+
+static int s390_cpumsf_setup_queue(struct s390_cpumsf *sf,
+                                  struct auxtrace_queue *queue,
+                                  unsigned int queue_nr, u64 ts)
+{
+       struct s390_cpumsf_queue *sfq = queue->priv;
+
+       if (list_empty(&queue->head))
+               return 0;
+
+       if (sfq == NULL) {
+               sfq = s390_cpumsf_alloc_queue(sf, queue_nr);
+               if (!sfq)
+                       return -ENOMEM;
+               queue->priv = sfq;
+
+               if (queue->cpu != -1)
+                       sfq->cpu = queue->cpu;
+       }
+       return auxtrace_heap__add(&sf->heap, queue_nr, ts);
+}
+
+static int s390_cpumsf_setup_queues(struct s390_cpumsf *sf, u64 ts)
+{
+       unsigned int i;
+       int ret = 0;
+
+       for (i = 0; i < sf->queues.nr_queues; i++) {
+               ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i],
+                                             i, ts);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+static int s390_cpumsf_update_queues(struct s390_cpumsf *sf, u64 ts)
+{
+       if (!sf->queues.new_data)
+               return 0;
+
+       sf->queues.new_data = false;
+       return s390_cpumsf_setup_queues(sf, ts);
+}
+
+static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp)
+{
+       unsigned int queue_nr;
+       u64 ts;
+       int ret;
+
+       while (1) {
+               struct auxtrace_queue *queue;
+               struct s390_cpumsf_queue *sfq;
+
+               if (!sf->heap.heap_cnt)
+                       return 0;
+
+               if (sf->heap.heap_array[0].ordinal >= timestamp)
+                       return 0;
+
+               queue_nr = sf->heap.heap_array[0].queue_nr;
+               queue = &sf->queues.queue_array[queue_nr];
+               sfq = queue->priv;
+
+               auxtrace_heap__pop(&sf->heap);
+               if (sf->heap.heap_cnt) {
+                       ts = sf->heap.heap_array[0].ordinal + 1;
+                       if (ts > timestamp)
+                               ts = timestamp;
+               } else {
+                       ts = timestamp;
+               }
+
+               ret = s390_cpumsf_run_decoder(sfq, &ts);
+               if (ret < 0) {
+                       auxtrace_heap__add(&sf->heap, queue_nr, ts);
+                       return ret;
+               }
+               if (!ret) {
+                       ret = auxtrace_heap__add(&sf->heap, queue_nr, ts);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
+static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu,
+                                  pid_t pid, pid_t tid, u64 ip)
+{
+       char msg[MAX_AUXTRACE_ERROR_MSG];
+       union perf_event event;
+       int err;
+
+       strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1);
+       auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
+                            code, cpu, pid, tid, ip, msg);
+
+       err = perf_session__deliver_synth_event(sf->session, &event, NULL);
+       if (err)
+               pr_err("s390 Auxiliary Trace: failed to deliver error event,"
+                       "error %d\n", err);
+       return err;
+}
+
+static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample)
+{
+       return s390_cpumsf_synth_error(sf, 1, sample->cpu,
+                                      sample->pid, sample->tid, 0);
+}
+
+static int
+s390_cpumsf_process_event(struct perf_session *session __maybe_unused,
+                         union perf_event *event,
+                         struct perf_sample *sample,
+                         struct perf_tool *tool)
+{
+       struct s390_cpumsf *sf = container_of(session->auxtrace,
+                                             struct s390_cpumsf,
+                                             auxtrace);
+       u64 timestamp = sample->time;
+       int err = 0;
+
+       if (dump_trace)
+               return 0;
+
+       if (!tool->ordered_events) {
+               pr_err("s390 Auxiliary Trace requires ordered events\n");
+               return -EINVAL;
+       }
+
+       if (event->header.type == PERF_RECORD_AUX &&
+           event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
+               return s390_cpumsf_lost(sf, sample);
+
+       if (timestamp) {
+               err = s390_cpumsf_update_queues(sf, timestamp);
+               if (!err)
+                       err = s390_cpumsf_process_queues(sf, timestamp);
+       }
+       return err;
+}
+
+struct s390_cpumsf_synth {
+       struct perf_tool cpumsf_tool;
+       struct perf_session *session;
+};
+
+static int
+s390_cpumsf_process_auxtrace_event(struct perf_session *session,
+                                  union perf_event *event __maybe_unused,
+                                  struct perf_tool *tool __maybe_unused)
+{
+       struct s390_cpumsf *sf = container_of(session->auxtrace,
+                                             struct s390_cpumsf,
+                                             auxtrace);
+
+       int fd = perf_data__fd(session->data);
+       struct auxtrace_buffer *buffer;
+       off_t data_offset;
+       int err;
+
+       if (sf->data_queued)
+               return 0;
+
+       if (perf_data__is_pipe(session->data)) {
+               data_offset = 0;
+       } else {
+               data_offset = lseek(fd, 0, SEEK_CUR);
+               if (data_offset == -1)
+                       return -errno;
+       }
+
+       err = auxtrace_queues__add_event(&sf->queues, session, event,
+                                        data_offset, &buffer);
+       if (err)
+               return err;
+
+       /* Dump here after copying piped trace out of the pipe */
+       if (dump_trace) {
+               if (auxtrace_buffer__get_data(buffer, fd)) {
+                       s390_cpumsf_dump_event(sf, buffer->data,
+                                              buffer->size);
+                       auxtrace_buffer__put_data(buffer);
+               }
+       }
+       return 0;
+}
+
+static void s390_cpumsf_free_events(struct perf_session *session __maybe_unused)
+{
+}
+
+static int s390_cpumsf_flush(struct perf_session *session __maybe_unused,
+                            struct perf_tool *tool __maybe_unused)
+{
+       return 0;
+}
+
+static void s390_cpumsf_free_queues(struct perf_session *session)
+{
+       struct s390_cpumsf *sf = container_of(session->auxtrace,
+                                             struct s390_cpumsf,
+                                             auxtrace);
+       struct auxtrace_queues *queues = &sf->queues;
+       unsigned int i;
+
+       for (i = 0; i < queues->nr_queues; i++)
+               zfree(&queues->queue_array[i].priv);
+       auxtrace_queues__free(queues);
+}
+
+static void s390_cpumsf_free(struct perf_session *session)
+{
+       struct s390_cpumsf *sf = container_of(session->auxtrace,
+                                             struct s390_cpumsf,
+                                             auxtrace);
+
+       auxtrace_heap__free(&sf->heap);
+       s390_cpumsf_free_queues(session);
+       session->auxtrace = NULL;
+       free(sf);
+}
+
+static int s390_cpumsf_get_type(const char *cpuid)
+{
+       int ret, family = 0;
+
+       ret = sscanf(cpuid, "%*[^,],%u", &family);
+       return (ret == 1) ? family : 0;
+}
+
+/* Check itrace options set on perf report command.
+ * Return true, if none are set or all options specified can be
+ * handled on s390.
+ * Return false otherwise.
+ */
+static bool check_auxtrace_itrace(struct itrace_synth_opts *itops)
+{
+       if (!itops || !itops->set)
+               return true;
+       pr_err("No --itrace options supported\n");
+       return false;
+}
+
+int s390_cpumsf_process_auxtrace_info(union perf_event *event,
+                                     struct perf_session *session)
+{
+       struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+       struct s390_cpumsf *sf;
+       int err;
+
+       if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event))
+               return -EINVAL;
+
+       sf = zalloc(sizeof(struct s390_cpumsf));
+       if (sf == NULL)
+               return -ENOMEM;
+
+       if (!check_auxtrace_itrace(session->itrace_synth_opts)) {
+               err = -EINVAL;
+               goto err_free;
+       }
+
+       err = auxtrace_queues__init(&sf->queues);
+       if (err)
+               goto err_free;
+
+       sf->session = session;
+       sf->machine = &session->machines.host; /* No kvm support */
+       sf->auxtrace_type = auxtrace_info->type;
+       sf->pmu_type = PERF_TYPE_RAW;
+       sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid);
+
+       sf->auxtrace.process_event = s390_cpumsf_process_event;
+       sf->auxtrace.process_auxtrace_event = s390_cpumsf_process_auxtrace_event;
+       sf->auxtrace.flush_events = s390_cpumsf_flush;
+       sf->auxtrace.free_events = s390_cpumsf_free_events;
+       sf->auxtrace.free = s390_cpumsf_free;
+       session->auxtrace = &sf->auxtrace;
+
+       if (dump_trace)
+               return 0;
+
+       err = auxtrace_queues__process_index(&sf->queues, session);
+       if (err)
+               goto err_free_queues;
+
+       if (sf->queues.populated)
+               sf->data_queued = true;
+
+       return 0;
+
+err_free_queues:
+       auxtrace_queues__free(&sf->queues);
+       session->auxtrace = NULL;
+err_free:
+       free(sf);
+       return err;
+}
diff --git a/tools/perf/util/s390-cpumsf.h b/tools/perf/util/s390-cpumsf.h
new file mode 100644 (file)
index 0000000..fb64d10
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2018
+ * Auxtrace support for s390 CPU-Measurement Sampling Facility
+ *
+ * Author(s):  Thomas Richter <tmricht@linux.ibm.com>
+ */
+
+#ifndef INCLUDE__PERF_S390_CPUMSF_H
+#define INCLUDE__PERF_S390_CPUMSF_H
+
+union perf_event;
+struct perf_session;
+struct perf_pmu;
+
+struct auxtrace_record *
+s390_cpumsf_recording_init(int *err, struct perf_pmu *s390_cpumsf_pmu);
+
+int s390_cpumsf_process_auxtrace_info(union perf_event *event,
+                                     struct perf_session *session);
+#endif
index 7b79c413486b3a6ef8b0f6c32e330911c5ce7638..45484f0f7292d983b3a741b67b3893b62ff2eeca 100644 (file)
@@ -535,7 +535,7 @@ static int perl_stop_script(void)
        return 0;
 }
 
-static int perl_generate_script(struct pevent *pevent, const char *outfile)
+static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
index bc32e57d17be76bddbc561bcbafe3b06e5295461..dfc6093f118c9787ad0a1dd8e530103188f66676 100644 (file)
@@ -871,8 +871,8 @@ static void python_process_tracepoint(struct perf_sample *sample,
                        offset = field->offset;
                        len    = field->size;
                        if (field->flags & FIELD_IS_DYNAMIC) {
-                               val     = pevent_read_number(scripting_context->pevent,
-                                                            data + offset, len);
+                               val     = tep_read_number(scripting_context->pevent,
+                                                         data + offset, len);
                                offset  = val;
                                len     = offset >> 16;
                                offset &= 0xffff;
@@ -1588,7 +1588,7 @@ static int python_stop_script(void)
        return 0;
 }
 
-static int python_generate_script(struct pevent *pevent, const char *outfile)
+static int python_generate_script(struct tep_handle *pevent, const char *outfile)
 {
        struct event_format *event = NULL;
        struct format_field *f;
index 001be4f9d3b9fc1bf032c74f2447b88b6c6cea26..97efbcad076e02249e75b5aa60f6f6804953a9a1 100644 (file)
@@ -1,12 +1,20 @@
 #!/usr/bin/python
 
 from os import getenv
+from subprocess import Popen, PIPE
+from re import sub
+
+def clang_has_option(option):
+    return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if "unknown argument" in o] == [ ]
 
 cc = getenv("CC")
 if cc == "clang":
     from _sysconfigdata import build_time_vars
-    from re import sub
     build_time_vars["CFLAGS"] = sub("-specs=[^ ]+", "", build_time_vars["CFLAGS"])
+    if not clang_has_option("-mcet"):
+        build_time_vars["CFLAGS"] = sub("-mcet", "", build_time_vars["CFLAGS"])
+    if not clang_has_option("-fcf-protection"):
+        build_time_vars["CFLAGS"] = sub("-fcf-protection", "", build_time_vars["CFLAGS"])
 
 from distutils.core import setup, Extension
 
index fed2952ab45a7bc7658ec5c568698f5c148abfef..b284276ec963548e3633ac7e21ecded8666b56e1 100644 (file)
@@ -601,7 +601,7 @@ static char *get_trace_output(struct hist_entry *he)
 {
        struct trace_seq seq;
        struct perf_evsel *evsel;
-       struct pevent_record rec = {
+       struct tep_record rec = {
                .data = he->raw_data,
                .size = he->raw_size,
        };
@@ -610,10 +610,10 @@ static char *get_trace_output(struct hist_entry *he)
 
        trace_seq_init(&seq);
        if (symbol_conf.raw_trace) {
-               pevent_print_fields(&seq, he->raw_data, he->raw_size,
-                                   evsel->tp_format);
+               tep_print_fields(&seq, he->raw_data, he->raw_size,
+                                evsel->tp_format);
        } else {
-               pevent_event_info(&seq, evsel->tp_format, &rec);
+               tep_event_info(&seq, evsel->tp_format, &rec);
        }
        /*
         * Trim the buffer, it starts at 4KB and we're not going to
@@ -2047,7 +2047,7 @@ static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                struct trace_seq seq;
 raw_field:
                trace_seq_init(&seq);
-               pevent_print_field(&seq, he->raw_data, hde->field);
+               tep_print_field(&seq, he->raw_data, hde->field);
                str = seq.buffer;
        }
 
@@ -2074,7 +2074,7 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
        if (field->flags & FIELD_IS_DYNAMIC) {
                unsigned long long dyn;
 
-               pevent_read_number_field(field, a->raw_data, &dyn);
+               tep_read_number_field(field, a->raw_data, &dyn);
                offset = dyn & 0xffff;
                size = (dyn >> 16) & 0xffff;
 
@@ -2311,7 +2311,7 @@ static int add_all_matching_fields(struct perf_evlist *evlist,
                if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                        continue;
 
-               field = pevent_find_any_field(evsel->tp_format, field_name);
+               field = tep_find_any_field(evsel->tp_format, field_name);
                if (field == NULL)
                        continue;
 
@@ -2378,7 +2378,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
        if (!strcmp(field_name, "*")) {
                ret = add_evsel_fields(evsel, raw_trace, level);
        } else {
-               field = pevent_find_any_field(evsel->tp_format, field_name);
+               field = tep_find_any_field(evsel->tp_format, field_name);
                if (field == NULL) {
                        pr_debug("Cannot find event field for %s.%s\n",
                                 event_name, field_name);
index 8bf302cafcecd6b285d68e2b2c56130019dea101..a97cf8e6be8656b1dadd531462310f48110233e9 100644 (file)
@@ -276,7 +276,7 @@ extern struct sort_entry sort_thread;
 extern struct list_head hist_entry__sort_list;
 
 struct perf_evlist;
-struct pevent;
+struct tep_handle;
 int setup_sorting(struct perf_evlist *evlist);
 int setup_output_field(void);
 void reset_output_field(void);
index c85d0d1a65ed72ffbdf2195004f0cd106602dde7..7b0ca7cbb7de852433a2dbd7494789096b555edd 100644 (file)
@@ -377,7 +377,7 @@ out:
 
 static int record_saved_cmdline(void)
 {
-       unsigned int size;
+       unsigned long long size;
        char *path;
        struct stat st;
        int ret, err = 0;
index e0a6e9a6a05355bc43e99585ec3a3f134e65b1e8..e76214f8d596bc97c71390f1c82c5f75ab2fd607 100644 (file)
@@ -32,7 +32,7 @@
 static int get_common_field(struct scripting_context *context,
                            int *offset, int *size, const char *type)
 {
-       struct pevent *pevent = context->pevent;
+       struct tep_handle *pevent = context->pevent;
        struct event_format *event;
        struct format_field *field;
 
@@ -41,14 +41,14 @@ static int get_common_field(struct scripting_context *context,
                        return 0;
 
                event = pevent->events[0];
-               field = pevent_find_common_field(event, type);
+               field = tep_find_common_field(event, type);
                if (!field)
                        return 0;
                *offset = field->offset;
                *size = field->size;
        }
 
-       return pevent_read_number(pevent, context->event_data + *offset, *size);
+       return tep_read_number(pevent, context->event_data + *offset, *size);
 }
 
 int common_lock_depth(struct scripting_context *context)
@@ -99,24 +99,24 @@ raw_field_value(struct event_format *event, const char *name, void *data)
        struct format_field *field;
        unsigned long long val;
 
-       field = pevent_find_any_field(event, name);
+       field = tep_find_any_field(event, name);
        if (!field)
                return 0ULL;
 
-       pevent_read_number_field(field, data, &val);
+       tep_read_number_field(field, data, &val);
 
        return val;
 }
 
 unsigned long long read_size(struct event_format *event, void *ptr, int size)
 {
-       return pevent_read_number(event->pevent, ptr, size);
+       return tep_read_number(event->pevent, ptr, size);
 }
 
 void event_format__fprintf(struct event_format *event,
                           int cpu, void *data, int size, FILE *fp)
 {
-       struct pevent_record record;
+       struct tep_record record;
        struct trace_seq s;
 
        memset(&record, 0, sizeof(record));
@@ -125,7 +125,7 @@ void event_format__fprintf(struct event_format *event,
        record.data = data;
 
        trace_seq_init(&s);
-       pevent_event_info(&s, event, &record);
+       tep_event_info(&s, event, &record);
        trace_seq_do_fprintf(&s, fp);
        trace_seq_destroy(&s);
 }
@@ -136,7 +136,7 @@ void event_format__print(struct event_format *event,
        return event_format__fprintf(event, cpu, data, size, stdout);
 }
 
-void parse_ftrace_printk(struct pevent *pevent,
+void parse_ftrace_printk(struct tep_handle *pevent,
                         char *file, unsigned int size __maybe_unused)
 {
        unsigned long long addr;
@@ -157,39 +157,38 @@ void parse_ftrace_printk(struct pevent *pevent,
                /* fmt still has a space, skip it */
                printk = strdup(fmt+1);
                line = strtok_r(NULL, "\n", &next);
-               pevent_register_print_string(pevent, printk, addr);
+               tep_register_print_string(pevent, printk, addr);
        }
 }
 
-void parse_saved_cmdline(struct pevent *pevent,
+void parse_saved_cmdline(struct tep_handle *pevent,
                         char *file, unsigned int size __maybe_unused)
 {
-       char *comm;
+       char comm[17]; /* Max comm length in the kernel is 16. */
        char *line;
        char *next = NULL;
        int pid;
 
        line = strtok_r(file, "\n", &next);
        while (line) {
-               sscanf(line, "%d %ms", &pid, &comm);
-               pevent_register_comm(pevent, comm, pid);
-               free(comm);
+               if (sscanf(line, "%d %16s", &pid, comm) == 2)
+                       tep_register_comm(pevent, comm, pid);
                line = strtok_r(NULL, "\n", &next);
        }
 }
 
-int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
+int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size)
 {
-       return pevent_parse_event(pevent, buf, size, "ftrace");
+       return tep_parse_event(pevent, buf, size, "ftrace");
 }
 
-int parse_event_file(struct pevent *pevent,
+int parse_event_file(struct tep_handle *pevent,
                     char *buf, unsigned long size, char *sys)
 {
-       return pevent_parse_event(pevent, buf, size, sys);
+       return tep_parse_event(pevent, buf, size, sys);
 }
 
-struct event_format *trace_find_next_event(struct pevent *pevent,
+struct event_format *trace_find_next_event(struct tep_handle *pevent,
                                           struct event_format *event)
 {
        static int idx;
index 40b425949aa31d7b4b334fa5923436f57dd2c842..3dfc1db6b25b62c192971d1ccc7dc4e83d03f662 100644 (file)
@@ -96,7 +96,7 @@ static void skip(int size)
        };
 }
 
-static unsigned int read4(struct pevent *pevent)
+static unsigned int read4(struct tep_handle *pevent)
 {
        unsigned int data;
 
@@ -105,7 +105,7 @@ static unsigned int read4(struct pevent *pevent)
        return __data2host4(pevent, data);
 }
 
-static unsigned long long read8(struct pevent *pevent)
+static unsigned long long read8(struct tep_handle *pevent)
 {
        unsigned long long data;
 
@@ -158,7 +158,7 @@ out:
        return str;
 }
 
-static int read_proc_kallsyms(struct pevent *pevent)
+static int read_proc_kallsyms(struct tep_handle *pevent)
 {
        unsigned int size;
 
@@ -181,7 +181,7 @@ static int read_proc_kallsyms(struct pevent *pevent)
        return 0;
 }
 
-static int read_ftrace_printk(struct pevent *pevent)
+static int read_ftrace_printk(struct tep_handle *pevent)
 {
        unsigned int size;
        char *buf;
@@ -208,7 +208,7 @@ static int read_ftrace_printk(struct pevent *pevent)
        return 0;
 }
 
-static int read_header_files(struct pevent *pevent)
+static int read_header_files(struct tep_handle *pevent)
 {
        unsigned long long size;
        char *header_page;
@@ -235,13 +235,13 @@ static int read_header_files(struct pevent *pevent)
                return -1;
        }
 
-       if (!pevent_parse_header_page(pevent, header_page, size,
-                                     pevent_get_long_size(pevent))) {
+       if (!tep_parse_header_page(pevent, header_page, size,
+                                  tep_get_long_size(pevent))) {
                /*
                 * The commit field in the page is of type long,
                 * use that instead, since it represents the kernel.
                 */
-               pevent_set_long_size(pevent, pevent->header_page_size_size);
+               tep_set_long_size(pevent, pevent->header_page_size_size);
        }
        free(header_page);
 
@@ -259,7 +259,7 @@ static int read_header_files(struct pevent *pevent)
        return ret;
 }
 
-static int read_ftrace_file(struct pevent *pevent, unsigned long long size)
+static int read_ftrace_file(struct tep_handle *pevent, unsigned long long size)
 {
        int ret;
        char *buf;
@@ -284,8 +284,8 @@ out:
        return ret;
 }
 
-static int read_event_file(struct pevent *pevent, char *sys,
-                           unsigned long long size)
+static int read_event_file(struct tep_handle *pevent, char *sys,
+                          unsigned long long size)
 {
        int ret;
        char *buf;
@@ -310,7 +310,7 @@ out:
        return ret;
 }
 
-static int read_ftrace_files(struct pevent *pevent)
+static int read_ftrace_files(struct tep_handle *pevent)
 {
        unsigned long long size;
        int count;
@@ -328,7 +328,7 @@ static int read_ftrace_files(struct pevent *pevent)
        return 0;
 }
 
-static int read_event_files(struct pevent *pevent)
+static int read_event_files(struct tep_handle *pevent)
 {
        unsigned long long size;
        char *sys;
@@ -356,7 +356,7 @@ static int read_event_files(struct pevent *pevent)
        return 0;
 }
 
-static int read_saved_cmdline(struct pevent *pevent)
+static int read_saved_cmdline(struct tep_handle *pevent)
 {
        unsigned long long size;
        char *buf;
@@ -399,7 +399,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
        int host_bigendian;
        int file_long_size;
        int file_page_size;
-       struct pevent *pevent = NULL;
+       struct tep_handle *pevent = NULL;
        int err;
 
        repipe = __repipe;
@@ -439,9 +439,9 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
 
        pevent = tevent->pevent;
 
-       pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
-       pevent_set_file_bigendian(pevent, file_bigendian);
-       pevent_set_host_bigendian(pevent, host_bigendian);
+       tep_set_flag(pevent, TEP_NSEC_OUTPUT);
+       tep_set_file_bigendian(pevent, file_bigendian);
+       tep_set_host_bigendian(pevent, host_bigendian);
 
        if (do_read(buf, 1) < 0)
                goto out;
@@ -451,8 +451,8 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
        if (!file_page_size)
                goto out;
 
-       pevent_set_long_size(pevent, file_long_size);
-       pevent_set_page_size(pevent, file_page_size);
+       tep_set_long_size(pevent, file_long_size);
+       tep_set_page_size(pevent, file_page_size);
 
        err = read_header_files(pevent);
        if (err)
@@ -479,9 +479,9 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe)
        repipe = false;
 
        if (show_funcs) {
-               pevent_print_funcs(pevent);
+               tep_print_funcs(pevent);
        } else if (show_printk) {
-               pevent_print_printk(pevent);
+               tep_print_printk(pevent);
        }
 
        pevent = NULL;
index b1e5c3a2b8e36cada0370ec30db338cb0fc45081..b749f812ac70fa2b9bf7d0447c5524fc7a8a142b 100644 (file)
@@ -66,7 +66,7 @@ static int python_start_script_unsupported(const char *script __maybe_unused,
        return -1;
 }
 
-static int python_generate_script_unsupported(struct pevent *pevent
+static int python_generate_script_unsupported(struct tep_handle *pevent
                                              __maybe_unused,
                                              const char *outfile
                                              __maybe_unused)
@@ -130,7 +130,7 @@ static int perl_start_script_unsupported(const char *script __maybe_unused,
        return -1;
 }
 
-static int perl_generate_script_unsupported(struct pevent *pevent
+static int perl_generate_script_unsupported(struct tep_handle *pevent
                                            __maybe_unused,
                                            const char *outfile __maybe_unused)
 {
index 1aa3686032688f95609274918aab97bd7850e14b..58bb72f266f3758c2f8da29a65b572afbe26be3f 100644 (file)
@@ -28,10 +28,10 @@ static bool tevent_initialized;
 
 int trace_event__init(struct trace_event *t)
 {
-       struct pevent *pevent = pevent_alloc();
+       struct tep_handle *pevent = tep_alloc();
 
        if (pevent) {
-               t->plugin_list = traceevent_load_plugins(pevent);
+               t->plugin_list = tep_load_plugins(pevent);
                t->pevent  = pevent;
        }
 
@@ -40,33 +40,33 @@ int trace_event__init(struct trace_event *t)
 
 static int trace_event__init2(void)
 {
-       int be = traceevent_host_bigendian();
-       struct pevent *pevent;
+       int be = tep_host_bigendian();
+       struct tep_handle *pevent;
 
        if (trace_event__init(&tevent))
                return -1;
 
        pevent = tevent.pevent;
-       pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
-       pevent_set_file_bigendian(pevent, be);
-       pevent_set_host_bigendian(pevent, be);
+       tep_set_flag(pevent, TEP_NSEC_OUTPUT);
+       tep_set_file_bigendian(pevent, be);
+       tep_set_host_bigendian(pevent, be);
        tevent_initialized = true;
        return 0;
 }
 
 int trace_event__register_resolver(struct machine *machine,
-                                  pevent_func_resolver_t *func)
+                                  tep_func_resolver_t *func)
 {
        if (!tevent_initialized && trace_event__init2())
                return -1;
 
-       return pevent_set_function_resolver(tevent.pevent, func, machine);
+       return tep_set_function_resolver(tevent.pevent, func, machine);
 }
 
 void trace_event__cleanup(struct trace_event *t)
 {
-       traceevent_unload_plugins(t->plugin_list, t->pevent);
-       pevent_free(t->pevent);
+       tep_unload_plugins(t->plugin_list, t->pevent);
+       tep_free(t->pevent);
 }
 
 /*
@@ -76,7 +76,7 @@ static struct event_format*
 tp_format(const char *sys, const char *name)
 {
        char *tp_dir = get_events_file(sys);
-       struct pevent *pevent = tevent.pevent;
+       struct tep_handle *pevent = tevent.pevent;
        struct event_format *event = NULL;
        char path[PATH_MAX];
        size_t size;
@@ -93,7 +93,7 @@ tp_format(const char *sys, const char *name)
        if (err)
                return ERR_PTR(err);
 
-       pevent_parse_format(pevent, &event, data, size, sys);
+       tep_parse_format(pevent, &event, data, size, sys);
 
        free(data);
        return event;
@@ -116,5 +116,5 @@ struct event_format *trace_event__tp_format_id(int id)
        if (!tevent_initialized && trace_event__init2())
                return ERR_PTR(-ENOMEM);
 
-       return pevent_find_event(tevent.pevent, id);
+       return tep_find_event(tevent.pevent, id);
 }
index dcbdb53dc702a664a8e70da5f7856e4e4243ec17..40204ec3a7a284d238b3b929bf6c5c5b15876ac5 100644 (file)
@@ -13,14 +13,14 @@ struct thread;
 struct plugin_list;
 
 struct trace_event {
-       struct pevent           *pevent;
+       struct tep_handle       *pevent;
        struct plugin_list      *plugin_list;
 };
 
 int trace_event__init(struct trace_event *t);
 void trace_event__cleanup(struct trace_event *t);
 int trace_event__register_resolver(struct machine *machine,
-                                  pevent_func_resolver_t *func);
+                                  tep_func_resolver_t *func);
 struct event_format*
 trace_event__tp_format(const char *sys, const char *name);
 
@@ -34,20 +34,20 @@ void event_format__fprintf(struct event_format *event,
 void event_format__print(struct event_format *event,
                         int cpu, void *data, int size);
 
-int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
-int parse_event_file(struct pevent *pevent,
+int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
+int parse_event_file(struct tep_handle *pevent,
                     char *buf, unsigned long size, char *sys);
 
 unsigned long long
 raw_field_value(struct event_format *event, const char *name, void *data);
 
-void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
-void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
-void parse_saved_cmdline(struct pevent *pevent, char *file, unsigned int size);
+void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size);
+void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size);
+void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size);
 
 ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
 
-struct event_format *trace_find_next_event(struct pevent *pevent,
+struct event_format *trace_find_next_event(struct tep_handle *pevent,
                                           struct event_format *event);
 unsigned long long read_size(struct event_format *event, void *ptr, int size);
 unsigned long long eval_flag(const char *flag);
@@ -83,7 +83,7 @@ struct scripting_ops {
        void (*process_stat)(struct perf_stat_config *config,
                             struct perf_evsel *evsel, u64 tstamp);
        void (*process_stat_interval)(u64 tstamp);
-       int (*generate_script) (struct pevent *pevent, const char *outfile);
+       int (*generate_script) (struct tep_handle *pevent, const char *outfile);
 };
 
 extern unsigned int scripting_max_stack;
@@ -94,7 +94,7 @@ void setup_perl_scripting(void);
 void setup_python_scripting(void);
 
 struct scripting_context {
-       struct pevent *pevent;
+       struct tep_handle *pevent;
        void *event_data;
 };
 
index a725b958cf31bb67f04267d75b951035cb767a8c..902ce6384f5748bd69818c5f269fdd2c0f6db572 100644 (file)
@@ -5,6 +5,8 @@
 #include <sys/stat.h>
 #include <sys/mman.h>
 #include <zlib.h>
+#include <linux/compiler.h>
+#include <unistd.h>
 
 #include "util/compress.h"
 #include "util/util.h"
@@ -79,3 +81,19 @@ out_close:
 
        return ret == Z_STREAM_END ? 0 : -1;
 }
+
+bool gzip_is_compressed(const char *input)
+{
+       int fd = open(input, O_RDONLY);
+       const uint8_t magic[2] = { 0x1f, 0x8b };
+       char buf[2] = { 0 };
+       ssize_t rc;
+
+       if (fd < 0)
+               return -1;
+
+       rc = read(fd, buf, sizeof(buf));
+       close(fd);
+       return rc == sizeof(buf) ?
+              memcmp(buf, magic, sizeof(buf)) == 0 : false;
+}
index b53596ad601bb4964231c58151308f61221868f5..2e7fd822796911c2875a1dad87dca67fe185d589 100644 (file)
@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
        if (get_nfit_res(pmem->phys_addr + offset)) {
                struct page *page;
 
-               *kaddr = pmem->virt_addr + offset;
+               if (kaddr)
+                       *kaddr = pmem->virt_addr + offset;
                page = vmalloc_to_page(pmem->virt_addr + offset);
-               *pfn = page_to_pfn_t(page);
+               if (pfn)
+                       *pfn = page_to_pfn_t(page);
                pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
                                __func__, pmem, pgoff, page_to_pfn(page));
 
                return 1;
        }
 
-       *kaddr = pmem->virt_addr + offset;
-       *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+       if (kaddr)
+               *kaddr = pmem->virt_addr + offset;
+       if (pfn)
+               *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
        /*
         * If badblocks are present, limit known good range to the
index e2926f72a821471214817f7ddb1c253a93b1ee02..cffc2c5a778db4e6a236e4491f29ff92aa3c3b75 100644 (file)
@@ -142,6 +142,28 @@ static u32 handle[] = {
 static unsigned long dimm_fail_cmd_flags[NUM_DCR];
 static int dimm_fail_cmd_code[NUM_DCR];
 
+static const struct nd_intel_smart smart_def = {
+       .flags = ND_INTEL_SMART_HEALTH_VALID
+               | ND_INTEL_SMART_SPARES_VALID
+               | ND_INTEL_SMART_ALARM_VALID
+               | ND_INTEL_SMART_USED_VALID
+               | ND_INTEL_SMART_SHUTDOWN_VALID
+               | ND_INTEL_SMART_MTEMP_VALID
+               | ND_INTEL_SMART_CTEMP_VALID,
+       .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
+       .media_temperature = 23 * 16,
+       .ctrl_temperature = 25 * 16,
+       .pmic_temperature = 40 * 16,
+       .spares = 75,
+       .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
+               | ND_INTEL_SMART_TEMP_TRIP,
+       .ait_status = 1,
+       .life_used = 5,
+       .shutdown_state = 0,
+       .vendor_size = 0,
+       .shutdown_count = 100,
+};
+
 struct nfit_test_fw {
        enum intel_fw_update_state state;
        u32 context;
@@ -752,15 +774,30 @@ static int nfit_test_cmd_smart_inject(
        if (buf_len != sizeof(*inj))
                return -EINVAL;
 
-       if (inj->mtemp_enable)
-               smart->media_temperature = inj->media_temperature;
-       if (inj->spare_enable)
-               smart->spares = inj->spares;
-       if (inj->fatal_enable)
-               smart->health = ND_INTEL_SMART_FATAL_HEALTH;
-       if (inj->unsafe_shutdown_enable) {
-               smart->shutdown_state = 1;
-               smart->shutdown_count++;
+       if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
+               if (inj->mtemp_enable)
+                       smart->media_temperature = inj->media_temperature;
+               else
+                       smart->media_temperature = smart_def.media_temperature;
+       }
+       if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
+               if (inj->spare_enable)
+                       smart->spares = inj->spares;
+               else
+                       smart->spares = smart_def.spares;
+       }
+       if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
+               if (inj->fatal_enable)
+                       smart->health = ND_INTEL_SMART_FATAL_HEALTH;
+               else
+                       smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
+       }
+       if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
+               if (inj->unsafe_shutdown_enable) {
+                       smart->shutdown_state = 1;
+                       smart->shutdown_count++;
+               } else
+                       smart->shutdown_state = 0;
        }
        inj->status = 0;
        smart_notify(bus_dev, dimm_dev, smart, thresh);
@@ -884,6 +921,16 @@ static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
        return 0;
 }
 
+static int override_return_code(int dimm, unsigned int func, int rc)
+{
+       if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
+               if (dimm_fail_cmd_code[dimm])
+                       return dimm_fail_cmd_code[dimm];
+               return -EIO;
+       }
+       return rc;
+}
+
 static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
 {
        int i;
@@ -894,13 +941,6 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
                        break;
        if (i >= ARRAY_SIZE(handle))
                return -ENXIO;
-
-       if ((1 << func) & dimm_fail_cmd_flags[i]) {
-               if (dimm_fail_cmd_code[i])
-                       return dimm_fail_cmd_code[i];
-               return -EIO;
-       }
-
        return i;
 }
 
@@ -939,48 +979,59 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
 
                        switch (func) {
                        case ND_INTEL_ENABLE_LSS_STATUS:
-                               return nd_intel_test_cmd_set_lss_status(t,
+                               rc = nd_intel_test_cmd_set_lss_status(t,
                                                buf, buf_len);
+                               break;
                        case ND_INTEL_FW_GET_INFO:
-                               return nd_intel_test_get_fw_info(t, buf,
+                               rc = nd_intel_test_get_fw_info(t, buf,
                                                buf_len, i - t->dcr_idx);
+                               break;
                        case ND_INTEL_FW_START_UPDATE:
-                               return nd_intel_test_start_update(t, buf,
+                               rc = nd_intel_test_start_update(t, buf,
                                                buf_len, i - t->dcr_idx);
+                               break;
                        case ND_INTEL_FW_SEND_DATA:
-                               return nd_intel_test_send_data(t, buf,
+                               rc = nd_intel_test_send_data(t, buf,
                                                buf_len, i - t->dcr_idx);
+                               break;
                        case ND_INTEL_FW_FINISH_UPDATE:
-                               return nd_intel_test_finish_fw(t, buf,
+                               rc = nd_intel_test_finish_fw(t, buf,
                                                buf_len, i - t->dcr_idx);
+                               break;
                        case ND_INTEL_FW_FINISH_QUERY:
-                               return nd_intel_test_finish_query(t, buf,
+                               rc = nd_intel_test_finish_query(t, buf,
                                                buf_len, i - t->dcr_idx);
+                               break;
                        case ND_INTEL_SMART:
-                               return nfit_test_cmd_smart(buf, buf_len,
+                               rc = nfit_test_cmd_smart(buf, buf_len,
                                                &t->smart[i - t->dcr_idx]);
+                               break;
                        case ND_INTEL_SMART_THRESHOLD:
-                               return nfit_test_cmd_smart_threshold(buf,
+                               rc = nfit_test_cmd_smart_threshold(buf,
                                                buf_len,
                                                &t->smart_threshold[i -
                                                        t->dcr_idx]);
+                               break;
                        case ND_INTEL_SMART_SET_THRESHOLD:
-                               return nfit_test_cmd_smart_set_threshold(buf,
+                               rc = nfit_test_cmd_smart_set_threshold(buf,
                                                buf_len,
                                                &t->smart_threshold[i -
                                                        t->dcr_idx],
                                                &t->smart[i - t->dcr_idx],
                                                &t->pdev.dev, t->dimm_dev[i]);
+                               break;
                        case ND_INTEL_SMART_INJECT:
-                               return nfit_test_cmd_smart_inject(buf,
+                               rc = nfit_test_cmd_smart_inject(buf,
                                                buf_len,
                                                &t->smart_threshold[i -
                                                        t->dcr_idx],
                                                &t->smart[i - t->dcr_idx],
                                                &t->pdev.dev, t->dimm_dev[i]);
+                               break;
                        default:
                                return -ENOTTY;
                        }
+                       return override_return_code(i, func, rc);
                }
 
                if (!test_bit(cmd, &cmd_mask)
@@ -1006,6 +1057,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                default:
                        return -ENOTTY;
                }
+               return override_return_code(i, func, rc);
        } else {
                struct ars_state *ars_state = &t->ars_state;
                struct nd_cmd_pkg *call_pkg = buf;
@@ -1302,29 +1354,9 @@ static void smart_init(struct nfit_test *t)
                .ctrl_temperature = 30 * 16,
                .spares = 5,
        };
-       const struct nd_intel_smart smart_data = {
-               .flags = ND_INTEL_SMART_HEALTH_VALID
-                       | ND_INTEL_SMART_SPARES_VALID
-                       | ND_INTEL_SMART_ALARM_VALID
-                       | ND_INTEL_SMART_USED_VALID
-                       | ND_INTEL_SMART_SHUTDOWN_VALID
-                       | ND_INTEL_SMART_MTEMP_VALID,
-               .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
-               .media_temperature = 23 * 16,
-               .ctrl_temperature = 25 * 16,
-               .pmic_temperature = 40 * 16,
-               .spares = 75,
-               .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
-                       | ND_INTEL_SMART_TEMP_TRIP,
-               .ait_status = 1,
-               .life_used = 5,
-               .shutdown_state = 0,
-               .vendor_size = 0,
-               .shutdown_count = 100,
-       };
 
        for (i = 0; i < t->num_dcr; i++) {
-               memcpy(&t->smart[i], &smart_data, sizeof(smart_data));
+               memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
                memcpy(&t->smart_threshold[i], &smart_t_data,
                                sizeof(smart_t_data));
        }
index db66f8a0d4bed71aa581cdbf82c9acf422daef05..37baecc3766f61064caedee4efadf151631d8de1 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 
-CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
-LDFLAGS += -fsanitize=address
+CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
+         -fsanitize=undefined
+LDFLAGS += -fsanitize=address -fsanitize=undefined
 LDLIBS+= -lpthread -lurcu
 TARGETS = main idr-test multiorder
 CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
@@ -21,6 +22,7 @@ targets: generated/map-shift.h $(TARGETS)
 
 main:  $(OFILES)
 
+idr-test.o: ../../../lib/test_ida.c
 idr-test: idr-test.o $(CORE_OFILES)
 
 multiorder: multiorder.o $(CORE_OFILES)
index ee820fcc29b0d3c201cbd63e56daa2392e89bcbf..321ba92c70d2acb56ef5e9573c0a4b23b304062f 100644 (file)
@@ -309,141 +309,61 @@ void idr_checks(void)
        idr_u32_test(0);
 }
 
+#define module_init(x)
+#define module_exit(x)
+#define MODULE_AUTHOR(x)
+#define MODULE_LICENSE(x)
+#define dump_stack()    assert(0)
+void ida_dump(struct ida *);
+
+#include "../../../lib/test_ida.c"
+
 /*
  * Check that we get the correct error when we run out of memory doing
- * allocations.  To ensure we run out of memory, just "forget" to preload.
+ * allocations.  In userspace, GFP_NOWAIT will always fail an allocation.
  * The first test is for not having a bitmap available, and the second test
  * is for not being able to allocate a level of the radix tree.
  */
 void ida_check_nomem(void)
-{
-       DEFINE_IDA(ida);
-       int id, err;
-
-       err = ida_get_new_above(&ida, 256, &id);
-       assert(err == -EAGAIN);
-       err = ida_get_new_above(&ida, 1UL << 30, &id);
-       assert(err == -EAGAIN);
-}
-
-/*
- * Check what happens when we fill a leaf and then delete it.  This may
- * discover mishandling of IDR_FREE.
- */
-void ida_check_leaf(void)
 {
        DEFINE_IDA(ida);
        int id;
-       unsigned long i;
 
-       for (i = 0; i < IDA_BITMAP_BITS; i++) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new(&ida, &id));
-               assert(id == i);
-       }
-
-       ida_destroy(&ida);
-       assert(ida_is_empty(&ida));
-
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new(&ida, &id));
-       assert(id == 0);
-       ida_destroy(&ida);
-       assert(ida_is_empty(&ida));
+       id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
+       IDA_BUG_ON(&ida, id != -ENOMEM);
+       id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
+       IDA_BUG_ON(&ida, id != -ENOMEM);
+       IDA_BUG_ON(&ida, !ida_is_empty(&ida));
 }
 
 /*
  * Check handling of conversions between exceptional entries and full bitmaps.
  */
-void ida_check_conv(void)
+void ida_check_conv_user(void)
 {
        DEFINE_IDA(ida);
-       int id;
        unsigned long i;
 
-       for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new_above(&ida, i + 1, &id));
-               assert(id == i + 1);
-               assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
-               assert(id == i + BITS_PER_LONG);
-               ida_remove(&ida, i + 1);
-               ida_remove(&ida, i + BITS_PER_LONG);
-               assert(ida_is_empty(&ida));
-       }
-
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-
-       for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new(&ida, &id));
-               assert(id == i);
-       }
-
-       for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
-               ida_remove(&ida, i - 1);
-       }
-       assert(ida_is_empty(&ida));
-
-       for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new(&ida, &id));
-               assert(id == i);
-       }
-
-       for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
-               ida_remove(&ida, i - 1);
-       }
-       assert(ida_is_empty(&ida));
-
        radix_tree_cpu_dead(1);
        for (i = 0; i < 1000000; i++) {
-               int err = ida_get_new(&ida, &id);
-               if (err == -EAGAIN) {
-                       assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
-                       assert(ida_pre_get(&ida, GFP_KERNEL));
-                       err = ida_get_new(&ida, &id);
+               int id = ida_alloc(&ida, GFP_NOWAIT);
+               if (id == -ENOMEM) {
+                       IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) !=
+                                       BITS_PER_LONG - 2);
+                       id = ida_alloc(&ida, GFP_KERNEL);
                } else {
-                       assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
+                       IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
+                                       BITS_PER_LONG - 2);
                }
-               assert(!err);
-               assert(id == i);
+               IDA_BUG_ON(&ida, id != i);
        }
        ida_destroy(&ida);
 }
 
-/*
- * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
- * Allocating up to 2^31-1 should succeed, and then allocating the next one
- * should fail.
- */
-void ida_check_max(void)
-{
-       DEFINE_IDA(ida);
-       int id, err;
-       unsigned long i, j;
-
-       for (j = 1; j < 65537; j *= 2) {
-               unsigned long base = (1UL << 31) - j;
-               for (i = 0; i < j; i++) {
-                       assert(ida_pre_get(&ida, GFP_KERNEL));
-                       assert(!ida_get_new_above(&ida, base, &id));
-                       assert(id == base + i);
-               }
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               err = ida_get_new_above(&ida, base, &id);
-               assert(err == -ENOSPC);
-               ida_destroy(&ida);
-               assert(ida_is_empty(&ida));
-               rcu_barrier();
-       }
-}
-
 void ida_check_random(void)
 {
        DEFINE_IDA(ida);
        DECLARE_BITMAP(bitmap, 2048);
-       int id, err;
        unsigned int i;
        time_t s = time(NULL);
 
@@ -454,15 +374,11 @@ void ida_check_random(void)
                int bit = i & 2047;
                if (test_bit(bit, bitmap)) {
                        __clear_bit(bit, bitmap);
-                       ida_remove(&ida, bit);
+                       ida_free(&ida, bit);
                } else {
                        __set_bit(bit, bitmap);
-                       do {
-                               ida_pre_get(&ida, GFP_KERNEL);
-                               err = ida_get_new_above(&ida, bit, &id);
-                       } while (err == -EAGAIN);
-                       assert(!err);
-                       assert(id == bit);
+                       IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL)
+                                       != bit);
                }
        }
        ida_destroy(&ida);
@@ -488,71 +404,12 @@ void ida_simple_get_remove_test(void)
        ida_destroy(&ida);
 }
 
-void ida_checks(void)
+void user_ida_checks(void)
 {
-       DEFINE_IDA(ida);
-       int id;
-       unsigned long i;
-
        radix_tree_cpu_dead(1);
-       ida_check_nomem();
-
-       for (i = 0; i < 10000; i++) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new(&ida, &id));
-               assert(id == i);
-       }
-
-       ida_remove(&ida, 20);
-       ida_remove(&ida, 21);
-       for (i = 0; i < 3; i++) {
-               assert(ida_pre_get(&ida, GFP_KERNEL));
-               assert(!ida_get_new(&ida, &id));
-               if (i == 2)
-                       assert(id == 10000);
-       }
-
-       for (i = 0; i < 5000; i++)
-               ida_remove(&ida, i);
-
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 5000, &id));
-       assert(id == 10001);
-
-       ida_destroy(&ida);
-
-       assert(ida_is_empty(&ida));
 
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 1, &id));
-       assert(id == 1);
-
-       ida_remove(&ida, id);
-       assert(ida_is_empty(&ida));
-       ida_destroy(&ida);
-       assert(ida_is_empty(&ida));
-
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 1, &id));
-       ida_destroy(&ida);
-       assert(ida_is_empty(&ida));
-
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 1, &id));
-       assert(id == 1);
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 1025, &id));
-       assert(id == 1025);
-       assert(ida_pre_get(&ida, GFP_KERNEL));
-       assert(!ida_get_new_above(&ida, 10000, &id));
-       assert(id == 10000);
-       ida_remove(&ida, 1025);
-       ida_destroy(&ida);
-       assert(ida_is_empty(&ida));
-
-       ida_check_leaf();
-       ida_check_max();
-       ida_check_conv();
+       ida_check_nomem();
+       ida_check_conv_user();
        ida_check_random();
        ida_simple_get_remove_test();
 
@@ -582,12 +439,19 @@ void ida_thread_tests(void)
                pthread_join(threads[i], NULL);
 }
 
+void ida_tests(void)
+{
+       user_ida_checks();
+       ida_checks();
+       ida_exit();
+       ida_thread_tests();
+}
+
 int __weak main(void)
 {
        radix_tree_init();
        idr_checks();
-       ida_checks();
-       ida_thread_tests();
+       ida_tests();
        radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
diff --git a/tools/testing/radix-tree/linux/xarray.h b/tools/testing/radix-tree/linux/xarray.h
new file mode 100644 (file)
index 0000000..df3812c
--- /dev/null
@@ -0,0 +1,2 @@
+#include "generated/map-shift.h"
+#include "../../../../include/linux/xarray.h"
index 257f3f8aacaa52dad996447e7e8f603a4637240c..b741686e53d63ad66375d9748f0e78d68bbced49 100644 (file)
@@ -27,20 +27,22 @@ void __gang_check(unsigned long middle, long down, long up, int chunk, int hop)
                item_check_present(&tree, middle + idx);
        item_check_absent(&tree, middle + up);
 
-       item_gang_check_present(&tree, middle - down,
-                       up + down, chunk, hop);
-       item_full_scan(&tree, middle - down, down + up, chunk);
+       if (chunk > 0) {
+               item_gang_check_present(&tree, middle - down, up + down,
+                               chunk, hop);
+               item_full_scan(&tree, middle - down, down + up, chunk);
+       }
        item_kill_tree(&tree);
 }
 
 void gang_check(void)
 {
-       __gang_check(1 << 30, 128, 128, 35, 2);
-       __gang_check(1 << 31, 128, 128, 32, 32);
-       __gang_check(1 << 31, 128, 128, 32, 100);
-       __gang_check(1 << 31, 128, 128, 17, 7);
-       __gang_check(0xffff0000, 0, 65536, 17, 7);
-       __gang_check(0xfffffffe, 1, 1, 17, 7);
+       __gang_check(1UL << 30, 128, 128, 35, 2);
+       __gang_check(1UL << 31, 128, 128, 32, 32);
+       __gang_check(1UL << 31, 128, 128, 32, 100);
+       __gang_check(1UL << 31, 128, 128, 17, 7);
+       __gang_check(0xffff0000UL, 0, 65536, 17, 7);
+       __gang_check(0xfffffffeUL, 1, 1, 17, 7);
 }
 
 void __big_gang_check(void)
@@ -322,7 +324,7 @@ static void single_thread_tests(bool long_run)
        printv(2, "after dynamic_height_check: %d allocated, preempt %d\n",
                nr_allocated, preempt_count);
        idr_checks();
-       ida_checks();
+       ida_tests();
        rcu_barrier();
        printv(2, "after idr_checks: %d allocated, preempt %d\n",
                nr_allocated, preempt_count);
@@ -369,7 +371,6 @@ int main(int argc, char **argv)
        iteration_test(0, 10 + 90 * long_run);
        iteration_test(7, 10 + 90 * long_run);
        single_thread_tests(long_run);
-       ida_thread_tests();
 
        /* Free any remaining preallocated nodes */
        radix_tree_cpu_dead(0);
index 31f1d9b6f506ef884c978b568aa9dc5d89fd8312..92d901eacf49c92f2855706f349d032220caa0f0 100644 (file)
@@ -39,8 +39,7 @@ void multiorder_checks(void);
 void iteration_test(unsigned order, unsigned duration);
 void benchmark(void);
 void idr_checks(void);
-void ida_checks(void);
-void ida_thread_tests(void);
+void ida_tests(void);
 
 struct item *
 item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
index 72c25a3cb65892c4d2ae0cd13add3c3cf2423b50..d9a72547837543cb62f9754dd587afa01510d516 100644 (file)
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
 
 include ../lib.mk
 
-all:
+all: khdr
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
index e03695287f763e809bfb78637f6ff166082efc1c..88cfe88e466fb0e642b1a95e20c9f82a7e689587 100644 (file)
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c
 
 TEST_PROGS := ion_test.sh
 
+KSFT_KHDR_INSTALL := 1
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
index 95eb3a53c3812c61bf6d15fae51c031a75b5eb1c..adacda50a4b211e64bb40489487e9727402806a8 100644 (file)
@@ -1 +1,2 @@
 test_memcontrol
+test_core
index 1c5d2b2a583b3348b13f47b89c75d5ce504ac621..14c9fe2848062f0c2a8c37004f087d07b1d976f6 100644 (file)
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
 int cg_read_strcmp(const char *cgroup, const char *control,
                   const char *expected)
 {
-       size_t size = strlen(expected) + 1;
+       size_t size;
        char *buf;
+       int ret;
+
+       /* Handle the case of comparing against empty string */
+       if (!expected)
+               size = 32;
+       else
+               size = strlen(expected) + 1;
 
        buf = malloc(size);
        if (!buf)
                return -1;
 
-       if (cg_read(cgroup, control, buf, size))
+       if (cg_read(cgroup, control, buf, size)) {
+               free(buf);
                return -1;
+       }
 
-       return strcmp(expected, buf);
+       ret = strcmp(expected, buf);
+       free(buf);
+       return ret;
 }
 
 int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
@@ -337,3 +348,24 @@ int is_swap_enabled(void)
 
        return cnt > 1;
 }
+
+int set_oom_adj_score(int pid, int score)
+{
+       char path[PATH_MAX];
+       int fd, len;
+
+       sprintf(path, "/proc/%d/oom_score_adj", pid);
+
+       fd = open(path, O_WRONLY | O_APPEND);
+       if (fd < 0)
+               return fd;
+
+       len = dprintf(fd, "%d", score);
+       if (len < 0) {
+               close(fd);
+               return len;
+       }
+
+       close(fd);
+       return 0;
+}
index 1ff6f9f1abdc07f96b4dc5b18cdb9f490ee85abb..9ac8b7958f83b26a268f54f19acb6aeeda391b75 100644 (file)
@@ -40,3 +40,4 @@ extern int get_temp_fd(void);
 extern int alloc_pagecache(int fd, size_t size);
 extern int alloc_anon(const char *cgroup, void *arg);
 extern int is_swap_enabled(void);
+extern int set_oom_adj_score(int pid, int score);
index cf0bddc9d271e369eb0a8f8afa4664c571151a18..28d321ba311b48d6f207cfdae5fc43699e2f1b81 100644 (file)
@@ -2,6 +2,7 @@
 #define _GNU_SOURCE
 
 #include <linux/limits.h>
+#include <linux/oom.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
        return 0;
 }
 
+static int alloc_anon_noexit(const char *cgroup, void *arg)
+{
+       int ppid = getppid();
+
+       if (alloc_anon(cgroup, arg))
+               return -1;
+
+       while (getppid() == ppid)
+               sleep(1);
+
+       return 0;
+}
+
+/*
+ * Wait until processes are killed asynchronously by the OOM killer
+ * If we exceed a timeout, fail.
+ */
+static int cg_test_proc_killed(const char *cgroup)
+{
+       int limit;
+
+       for (limit = 10; limit > 0; limit--) {
+               if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
+                       return 0;
+
+               usleep(100000);
+       }
+       return -1;
+}
+
 /*
  * First, this test creates the following hierarchy:
  * A       memory.min = 50M,  memory.max = 200M
@@ -964,6 +995,177 @@ cleanup:
        return ret;
 }
 
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the leaf (but not the parent) were killed.
+ */
+static int test_memcg_oom_group_leaf_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child;
+
+       parent = cg_name(root, "memcg_test_0");
+       child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+       if (!parent || !child)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_create(child))
+               goto cleanup;
+
+       if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.max", "50M"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.oom.group", "1"))
+               goto cleanup;
+
+       cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       if (!cg_run(child, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_test_proc_killed(child))
+               goto cleanup;
+
+       if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
+               goto cleanup;
+
+       if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (child)
+               cg_destroy(child);
+       if (parent)
+               cg_destroy(parent);
+       free(child);
+       free(parent);
+
+       return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the parent and leaf were killed.
+ */
+static int test_memcg_oom_group_parent_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child;
+
+       parent = cg_name(root, "memcg_test_0");
+       child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+       if (!parent || !child)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_create(child))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.max", "80M"))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.oom.group", "1"))
+               goto cleanup;
+
+       cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+
+       if (!cg_run(child, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_test_proc_killed(child))
+               goto cleanup;
+       if (cg_test_proc_killed(parent))
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (child)
+               cg_destroy(child);
+       if (parent)
+               cg_destroy(parent);
+       free(child);
+       free(parent);
+
+       return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes were killed except those set with OOM_SCORE_ADJ_MIN
+ */
+static int test_memcg_oom_group_score_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *memcg;
+       int safe_pid;
+
+       memcg = cg_name(root, "memcg_test_0");
+
+       if (!memcg)
+               goto cleanup;
+
+       if (cg_create(memcg))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.max", "50M"))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.oom.group", "1"))
+               goto cleanup;
+
+       safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+       if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
+               goto cleanup;
+
+       cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+       if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
+               goto cleanup;
+
+       if (kill(safe_pid, SIGKILL))
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (memcg)
+               cg_destroy(memcg);
+       free(memcg);
+
+       return ret;
+}
+
+
 #define T(x) { x, #x }
 struct memcg_test {
        int (*fn)(const char *root);
@@ -978,6 +1180,9 @@ struct memcg_test {
        T(test_memcg_oom_events),
        T(test_memcg_swap_max),
        T(test_memcg_sock),
+       T(test_memcg_oom_group_leaf_events),
+       T(test_memcg_oom_group_parent_events),
+       T(test_memcg_oom_group_score_events),
 };
 #undef T
 
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644 (file)
index 0000000..4e151f1
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
index ff8feca49746074d71b28455ff58a01a7d1b491c..ad1eeb14fda7ebf5717e7183ea83b31f6013600c 100644 (file)
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \
 
 TEST_PROGS := run.sh
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_FILES): $(HEADERS)
index 1bbb47565c554cf06a95fe2a205231de75f7797e..4665cdbf1a8d4b165abb5081c408508dc56ebd78 100644 (file)
@@ -21,11 +21,8 @@ endef
 CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
 LDLIBS += -lmount -I/usr/include/libmount
 
-$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h
+$(BINARIES):| khdr
+$(BINARIES): ../../../gpio/gpio-utils.o
 
 ../../../gpio/gpio-utils.o:
        make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
-
-../../../../usr/include/linux/gpio.h:
-       make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
-
index 15e6b75fc3a5e499fb7bcc981b6f19059406c2f6..a3edb2c8e43d0a83950d8b4535756ea0ec82eb8a 100644 (file)
@@ -19,7 +19,6 @@
 #define KSFT_FAIL  1
 #define KSFT_XFAIL 2
 #define KSFT_XPASS 3
-/* Treat skip as pass */
 #define KSFT_SKIP  4
 
 /* counters */
index 03b0f551bedffc7eecb6fd4452e99de1a8a4b828..87d1a8488af802597fdd1b06e164a04974509c45 100644 (file)
@@ -37,9 +37,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
 $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
        $(AR) crs $@ $^
 
-$(LINUX_HDR_PATH):
-       make -C $(top_srcdir) headers_install
-
-all: $(STATIC_LIBS) $(LINUX_HDR_PATH)
+all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH)
+$(STATIC_LIBS):| khdr
index 17ab36605a8e8933b77624843572a2d23fe846ec..0a8e75886224b371f6da30f7a01d10f652313b66 100644 (file)
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
 TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
+top_srcdir ?= ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH           ?= $(SUBARCH)
+
 all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 
+.PHONY: khdr
+khdr:
+       make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+
+ifdef KSFT_KHDR_INSTALL
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+endif
+
 .ONESHELL:
 define RUN_TEST_PRINT_RESULT
        TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST";  \
index 2fde30191a47e6251c4fc6477548d70a394269e1..a7e8cd5bb265d86cca85e91adc69043e18a3c13a 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTPLUG_SPARSE=y
 CONFIG_NOTIFIER_ERROR_INJECTION=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
index 9cca68e440a0b7107c10b1859e6fc8fb2ed2fe4f..919aa2ac00af7e601be743fac20f9e474873818e 100644 (file)
@@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
 TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
index f8cc38afffa2e6a35dd316a40e2493356c16063c..32a194e3e07a5a60d4c00dfdcfd25ad31ef38b6e 100755 (executable)
@@ -46,6 +46,9 @@
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
+# Some systems don't have a ping6 binary anymore
+which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+
 tests="
        pmtu_vti6_exception             vti6: PMTU exceptions
        pmtu_vti4_exception             vti4: PMTU exceptions
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+       ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
 
        # Check that exception was created
        if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
        fail=0
 
        min=68
-       max=$((65528 - 20))
+       max=$((65535 - 20))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
                ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
index b3ebf2646e52f3e15e7d69c648369f4c510a54b4..8fdfeafaf8c00b4b327c33e7eeac02a42979d3dc 100644 (file)
@@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple)
        EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
 }
 
+TEST_F(tls, recv_peek_multiple_records)
+{
+       char const *test_str = "test_read_peek_mult_recs";
+       char const *test_str_first = "test_read_peek";
+       char const *test_str_second = "_mult_recs";
+       int len;
+       char buf[64];
+
+       len = strlen(test_str_first);
+       EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
+
+       len = strlen(test_str_second) + 1;
+       EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+       /* MSG_PEEK can only peek into the current record. */
+       len = strlen(test_str_first) + 1;
+       EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
+
+       /* Non-MSG_PEEK will advance strparser (and therefore record)
+        * however.
+        */
+       len = strlen(test_str) + 1;
+       EXPECT_EQ(memcmp(test_str, buf, len), 0);
+
+       /* MSG_MORE will hold current record open, so later MSG_PEEK
+        * will see everything.
+        */
+       len = strlen(test_str_first);
+       EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
+
+       len = strlen(test_str_second) + 1;
+       EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+       len = strlen(test_str) + 1;
+       EXPECT_EQ(memcmp(test_str, buf, len), 0);
+}
+
 TEST_F(tls, pollin)
 {
        char const *test_str = "test_poll";
index a728040edbe18e14061eda8302043f340392e27f..14cfcf006936da9dd465ae7fbd8e18b1ebb0a1bf 100644 (file)
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
 
 all: $(TEST_PROGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index f03763d816172e9a825ca896f0f3733e2109d503..30f9b54bd66689094d2556c354373cfc186a1932 100644 (file)
             "$TC actions flush action police"
         ]
     },
+    {
+        "id": "6aaf",
+        "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action police index 1",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
+    {
+        "id": "29b1",
+        "name": "Add police actions with conform-exceed control <invalid>/drop",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action police",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
     {
         "id": "c26f",
         "name": "Add police action with invalid peakrate value",
index 9881876d2aa0b065f0367a87ab184d41139360a3..e94b7b14bcb2608a7e5bb2694527a760cd2a93c6 100644 (file)
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests
 
 include ../lib.mk
 
-$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h
 $(OUTPUT)/userfaultfd: LDLIBS += -lpthread
 
 $(OUTPUT)/mlock-random-test: LDLIBS += -lcap
-
-../../../../usr/include/linux/kernel.h:
-       make -C ../../../.. headers_install
index 30cb0a0713fff9c63a158129cc5e59c115cb4e88..37908a83ddc27eac33d40f76ea869edf8bdd957d 100644 (file)
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug",
-       "/debug",
-       0,
-};
-
 /*
  * data structures
  */
index f82c2eaa859d11fa755e8809dd9fce52af95fdec..334b16db0ebbe99b82855c80e641986c4f93edc8 100644 (file)
@@ -30,8 +30,8 @@ struct slabinfo {
        int alias;
        int refs;
        int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-       int hwcache_align, object_size, objs_per_slab;
-       int sanity_checks, slab_size, store_user, trace;
+       unsigned int hwcache_align, object_size, objs_per_slab;
+       unsigned int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
        unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;
index 237a028693ce9b497deabb95aa8f922485bee5fd..748f6a60bb1e00070dbc61a895ba19e4370b476f 100644 (file)
@@ -24,7 +24,7 @@ $(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
 # Generate the initramfs cpio archive
 
 hostprogs-y := gen_init_cpio
-initramfs   := $(CONFIG_SHELL) $(srctree)/scripts/gen_initramfs_list.sh
+initramfs   := $(CONFIG_SHELL) $(srctree)/$(src)/gen_initramfs_list.sh
 ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
                        $(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
 ramfs-args  := \
similarity index 99%
rename from scripts/gen_initramfs_list.sh
rename to usr/gen_initramfs_list.sh
index 10e528b3a08fc4b4f4e2bde66ced6fa4d7684787..0aad760fcd8c3704a6445e1a5218082d3d069063 100755 (executable)
@@ -174,7 +174,7 @@ dir_filelist() {
        ${dep_list}header "$1"
 
        srcdir=$(echo "$1" | sed -e 's://*:/:g')
-       dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n" | sort)
+       dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n" | LANG=C sort)
 
        # If $dirlist is only one line, then the directory is empty
        if [  "$(echo "${dirlist}" | wc -l)" -gt 1 ]; then
index b28da799f6a6288c4e8c9b094ac10d3cb394cf04..d07648f05bbf8068adcb65fcf68991fbfb8480e9 100644 (file)
@@ -30,8 +30,8 @@ __irf_start:
 .incbin __stringify(INITRAMFS_IMAGE)
 __irf_end:
 .section .init.ramfs.info,"a"
-.globl VMLINUX_SYMBOL(__initramfs_size)
-VMLINUX_SYMBOL(__initramfs_size):
+.globl __initramfs_size
+__initramfs_size:
 #ifdef CONFIG_64BIT
        .quad __irf_end - __irf_start
 #else
index 91aaf73b00df8a385a28c2a221505b6ba82062b1..ed162a6c57c597d89a7f08600b84a4425feb0b13 100644 (file)
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
        return 0;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       if (!kvm->arch.pgd)
-               return 0;
-
-       trace_kvm_unmap_hva(hva);
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end)
 {
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
        unsigned long end = hva + PAGE_SIZE;
+       kvm_pfn_t pfn = pte_pfn(pte);
        pte_t stage2_pte;
 
        if (!kvm->arch.pgd)
                return;
 
        trace_kvm_set_spte_hva(hva);
-       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+       /*
+        * We've moved a page around, probably through CoW, so let's treat it
+        * just like a translation fault and clean the cache to the PoC.
+        */
+       clean_dcache_guest_page(pfn, PAGE_SIZE);
+       stage2_pte = pfn_pte(pfn, PAGE_S2);
        handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
 }
 
index e53b596f483b99f0a7f9d897938b30f47a079316..57b3edebbb4043e7cbeef0f834ccc2a1848caf78 100644 (file)
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
                  __entry->vcpu_pc, __entry->instr, __entry->cpsr)
 );
 
-TRACE_EVENT(kvm_unmap_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
 TRACE_EVENT(kvm_unmap_hva_range,
        TP_PROTO(unsigned long start, unsigned long end),
        TP_ARGS(start, end),