Merge branches 'arm/msm', 'arm/allwinner', 'arm/smmu', 'x86/vt-d', 'hyper-v', 'core...
authorJoerg Roedel <jroedel@suse.de>
Tue, 2 Jun 2020 08:32:04 +0000 (10:32 +0200)
committerJoerg Roedel <jroedel@suse.de>
Tue, 2 Jun 2020 08:32:04 +0000 (10:32 +0200)
1154 files changed:
.mailmap
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/core-api/printk-formats.rst
Documentation/devicetree/bindings/dma/fsl-edma.txt
Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/dsa/b53.txt
Documentation/networking/devlink/ice.rst
Documentation/process/coding-style.rst
Documentation/usb/raw-gadget.rst
Documentation/virt/kvm/index.rst
Documentation/virt/kvm/running-nested-guests.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/configs/hsdk_defconfig
arch/arc/include/asm/dsp-impl.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/kernel/Makefile
arch/arc/kernel/ptrace.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/kernel/unwind.c
arch/arc/plat-eznps/Kconfig
arch/arm/Kconfig
arch/arm/boot/compressed/vmlinux.lds.S
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am437x-idk-evm.dts
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am574x-idk.dts
arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/bcm-hr2.dtsi
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx27-phytec-phycard-s-rdk.dts
arch/arm/boot/dts/imx6dl-yapp4-ursa.dts
arch/arm/boot/dts/imx6q-b450v3.dts
arch/arm/boot/dts/imx6q-b650v3.dts
arch/arm/boot/dts/imx6q-b850v3.dts
arch/arm/boot/dts/imx6q-bx50v3.dtsi
arch/arm/boot/dts/iwg20d-q7-dbcm-ca.dtsi
arch/arm/boot/dts/mmp3-dell-ariel.dts
arch/arm/boot/dts/mmp3.dtsi
arch/arm/boot/dts/motorola-mapphone-common.dtsi
arch/arm/boot/dts/r7s9210.dtsi
arch/arm/boot/dts/r8a73a4.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7745-iwg22d-sodimm-dbhd-ca.dts
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7790-stout.dts
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/r8a7792-blanche.dts
arch/arm/boot/dts/r8a7792-wheat.dts
arch/arm/boot/dts/r8a7793-gose.dts
arch/arm/boot/dts/r8a7794-silk.dts
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk3228-evb.dts
arch/arm/boot/dts/rk3229-xms6.dts
arch/arm/boot/dts/rk322x.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/configs/keystone_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/crypto/chacha-glue.c
arch/arm/crypto/nhpoly1305-neon-glue.c
arch/arm/crypto/poly1305-glue.c
arch/arm/include/asm/assembler.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/uaccess-asm.h [new file with mode: 0644]
arch/arm/kernel/atags_proc.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/ptrace.c
arch/arm/mach-oxnas/platsmp.c
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/amlogic/meson-g12.dtsi
arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
arch/arm64/boot/dts/amlogic/meson-g12b-ugoos-am6.dts
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp-pinfunc.h
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/mediatek/mt8173.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sdm845-db845c.dts
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
arch/arm64/boot/dts/renesas/r8a77980-condor.dts
arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
arch/arm64/boot/dts/renesas/r8a77980.dtsi
arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
arch/arm64/boot/dts/renesas/r8a77995-draak.dts
arch/arm64/boot/dts/rockchip/px30.dtsi
arch/arm64/boot/dts/rockchip/rk3308.dtsi
arch/arm64/boot/dts/rockchip/rk3328-evb.dts
arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
arch/arm64/boot/dts/rockchip/rk3328.dtsi
arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/chacha-neon-glue.c
arch/arm64/crypto/nhpoly1305-neon-glue.c
arch/arm64/crypto/poly1305-glue.c
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/mm/hugetlbpage.c
arch/csky/Kconfig
arch/csky/Makefile
arch/csky/abiv1/inc/abi/entry.h
arch/csky/abiv2/inc/abi/entry.h
arch/csky/abiv2/mcount.S
arch/csky/include/asm/processor.h
arch/csky/include/asm/ptrace.h
arch/csky/include/asm/thread_info.h
arch/csky/include/asm/uaccess.h
arch/csky/kernel/Makefile
arch/csky/kernel/asm-offsets.c
arch/csky/kernel/dumpstack.c [deleted file]
arch/csky/kernel/entry.S
arch/csky/kernel/ftrace.c
arch/csky/kernel/perf_callchain.c
arch/csky/kernel/probes/uprobes.c
arch/csky/kernel/process.c
arch/csky/kernel/ptrace.c
arch/csky/kernel/stacktrace.c
arch/csky/lib/usercopy.c
arch/ia64/include/asm/device.h
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/hash.h
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_32.S
arch/powerpc/kernel/head_40x.S
arch/powerpc/kernel/ima_arch.c
arch/powerpc/kernel/syscall_64.c
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/book3s32/hash_low.S
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/include/asm/csr.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/mmio.h
arch/riscv/include/asm/mmiowb.h
arch/riscv/include/asm/perf_event.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/set_memory.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpu_ops.c
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/perf_event.c
arch/riscv/kernel/process.c
arch/riscv/kernel/sbi.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/note.S [new file with mode: 0644]
arch/riscv/mm/init.c
arch/s390/include/asm/pci_io.h
arch/s390/kernel/machine_kexec_file.c
arch/s390/kernel/machine_kexec_reloc.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/lib/uaccess.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/pgalloc.c
arch/s390/pci/pci_mmio.c
arch/sh/include/uapi/asm/sockios.h
arch/sparc/mm/srmmu.c
arch/um/drivers/vector_user.h
arch/um/include/asm/xor.h
arch/um/kernel/skas/syscall.c
arch/x86/Kconfig
arch/x86/boot/tools/build.c
arch/x86/crypto/blake2s-glue.c
arch/x86/crypto/chacha_glue.c
arch/x86/crypto/nhpoly1305-avx2-glue.c
arch/x86/crypto/nhpoly1305-sse2-glue.c
arch/x86/crypto/poly1305_glue.c
arch/x86/entry/calling.h
arch/x86/entry/entry_64.S
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/device.h
arch/x86/include/asm/dma.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/io_bitmap.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/stackprotector.h
arch/x86/include/asm/unwind.h
arch/x86/include/uapi/asm/unistd.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/ioport.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/unwind_frame.c
arch/x86/kernel/unwind_orc.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/init_64.c
arch/x86/mm/mmio-mod.c
arch/x86/mm/pat/set_memory.c
arch/x86/xen/smp_pv.c
block/bfq-iosched.c
block/blk-cgroup.c
block/blk-core.c
block/blk-iocost.c
block/partitions/core.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/device_pm.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/sleep.c
drivers/amba/bus.c
drivers/base/component.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/null_blk_main.c
drivers/block/null_blk_zoned.c
drivers/block/virtio_blk.c
drivers/bus/mhi/core/init.c
drivers/bus/mhi/core/internal.h
drivers/bus/mhi/core/main.c
drivers/bus/mhi/core/pm.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/clk.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/gcc-sm8150.c
drivers/clk/rockchip/clk-rk3228.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/ti/clk-33xx.c
drivers/clk/ti/clkctrl.c
drivers/clk/versatile/clk-impd1.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caampkc.c
drivers/crypto/chelsio/chcr_ktls.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dax/kmem.c
drivers/dma-buf/dma-buf.c
drivers/dma/Kconfig
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/idxd/device.c
drivers/dma/idxd/irq.c
drivers/dma/mmp_tdma.c
drivers/dma/owl-dma.c
drivers/dma/pch_dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/k3-psil.c
drivers/dma/ti/k3-udma.c
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/xilinx/zynqmp_dma.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/earlycon.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/mem.c
drivers/firmware/efi/libstub/tpm.c
drivers/firmware/efi/libstub/x86-stub.c
drivers/firmware/efi/tpm.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-exar.c
drivers/gpio/gpio-mlxbf2.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c [deleted file]
drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h [deleted file]
drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_hdcp.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_scheduler.h
drivers/gpu/drm/i915/i915_scheduler_types.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/selftests/i915_vma.c
drivers/gpu/drm/ingenic/ingenic-drm.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_image.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_gem.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/host1x/dev.c
drivers/hid/Kconfig
drivers/hid/hid-alps.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg-g15.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/usbhid.h
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hv/hv.c
drivers/hv/hv_trace.h
drivers/hv/vmbus_drv.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/drivetemp.c
drivers/hwmon/nct7904.c
drivers/hwtracing/coresight/coresight-cti-platform.c
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/i2c-altera.c
drivers/i2c/busses/i2c-amd-mp2-pci.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-at91-master.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-of.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/iio/accel/sca3000.c
drivers/iio/adc/stm32-adc.c
drivers/iio/adc/stm32-dfsdm-adc.c
drivers/iio/adc/ti-ads8344.c
drivers/iio/chemical/atlas-sensor.c
drivers/iio/dac/vf610_dac.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_std_types_async_fd.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/i40iw/i40iw_hw.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/infiniband/sw/rdmavt/mmap.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/srq.c
drivers/infiniband/sw/rxe/rxe_mmap.c
drivers/infiniband/sw/rxe/rxe_queue.c
drivers/infiniband/sw/siw/siw_qp_tx.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/input/evdev.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/applespi.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/dlink-dir685-touchkeys.c
drivers/input/misc/axp20x-pek.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_driver.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/mms114.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/interconnect/qcom/osm-l3.c
drivers/interconnect/qcom/sdm845.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu.h
drivers/iommu/amd_iommu_debugfs.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_proto.h [deleted file]
drivers/iommu/amd_iommu_types.h
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/dmar.c
drivers/iommu/hyperv-iommu.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.c
drivers/iommu/intel-pasid.h
drivers/iommu/intel-svm.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/omap-iommu.c
drivers/iommu/qcom_iommu.c
drivers/iommu/sun50i-iommu.c [new file with mode: 0644]
drivers/iommu/virtio-iommu.c
drivers/ipack/carriers/tpci200.c
drivers/md/dm-mpath.c
drivers/md/dm-verity-fec.c
drivers/md/dm-writecache.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/mei/client.c
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/pci-me.c
drivers/misc/uacce/uacce.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/queue.c
drivers/mmc/host/alcor.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci-xenon.c
drivers/mmc/host/sdhci.c
drivers/most/core.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/brcmnand/brcmnand.c
drivers/mtd/nand/spi/core.c
drivers/mtd/ubi/debug.c
drivers/net/bareudp.c
drivers/net/bonding/bond_sysfs_slave.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/sun4i_can.c
drivers/net/dsa/b53/b53_srab.c
drivers/net/dsa/dsa_loop.c
drivers/net/dsa/mt7530.c
drivers/net/dsa/mt7530.h
drivers/net/dsa/mv88e6xxx/Kconfig
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/dpaa/Kconfig
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/events.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_regs.c
drivers/net/ethernet/natsemi/jazzsonic.c
drivers/net/ethernet/netronome/nfp/abm/main.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sgi/ioc3-eth.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_priv.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/gtp.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipa/gsi.c
drivers/net/ipa/gsi_reg.h
drivers/net/ipa/gsi_trans.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_smp2p.c
drivers/net/macsec.c
drivers/net/netdevsim/dev.c
drivers/net/phy/broadcom.c
drivers/net/phy/dp83640.c
drivers/net/phy/dp83822.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mscc/mscc.h
drivers/net/phy/mscc/mscc_mac.h
drivers/net/phy/mscc/mscc_macsec.c
drivers/net/phy/mscc/mscc_macsec.h
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/pppoe.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/hso.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/wireguard/messages.h
drivers/net/wireguard/noise.c
drivers/net/wireguard/noise.h
drivers/net/wireguard/queueing.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireguard/selftest/counter.c
drivers/net/wireguard/selftest/ratelimiter.c
drivers/net/wireguard/send.c
drivers/net/wireguard/socket.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/nfc/st21nfca/dep.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/pci/ats.c
drivers/phy/qualcomm/phy-qcom-qusb2.c
drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
drivers/pinctrl/actions/pinctrl-s700.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/pinctrl/mediatek/pinctrl-paris.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/platform/chrome/cros_ec_sensorhub.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/intel-uncore-frequency.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/surface3_power.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/xiaomi-wmi.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/regulator/core.c
drivers/s390/net/ism_drv.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/soc/mediatek/mtk-cmdq-helper.c
drivers/staging/gasket/gasket_core.c
drivers/staging/greybus/uart.c
drivers/staging/iio/resolver/ad2s1210.c
drivers/staging/kpc2000/kpc2000/core.c
drivers/staging/ks7010/TODO
drivers/staging/wfx/scan.c
drivers/target/target_core_iblock.c
drivers/target/target_core_transport.c
drivers/thunderbolt/usb4.c
drivers/tty/hvc/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/sifive.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/vt.c
drivers/usb/cdns3/gadget.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/core/devio.c
drivers/usb/core/hub.c
drivers/usb/core/message.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/legacy/audio.c
drivers/usb/gadget/legacy/cdc2.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/legacy/ncm.c
drivers/usb/gadget/legacy/raw_gadget.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/tegra-xudc.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/mtu3/mtu3_debugfs.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
fs/afs/fs_probe.c
fs/afs/fsclient.c
fs/afs/vl_probe.c
fs/afs/yfsclient.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/discard.h
fs/btrfs/disk-io.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/cachefiles/rdwr.c
fs/ceph/caps.c
fs/ceph/debugfs.c
fs/ceph/mds_client.c
fs/ceph/quota.c
fs/cifs/cifssmb.c
fs/cifs/file.c
fs/cifs/inode.c
fs/configfs/dir.c
fs/coredump.c
fs/eventpoll.c
fs/exec.c
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/ioctl.c
fs/file.c
fs/gfs2/bmap.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/log.c
fs/gfs2/lops.c
fs/gfs2/meta_io.c
fs/gfs2/quota.c
fs/gfs2/quota.h
fs/gfs2/super.c
fs/gfs2/util.c
fs/io_uring.c
fs/ioctl.c
fs/iomap/fiemap.c
fs/nfs/fscache.c
fs/nfs/mount_clnt.c
fs/nfs/nfs3acl.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/pnfs_nfs.c
fs/nfs/super.c
fs/nfs/write.c
fs/notify/fanotify/fanotify.c
fs/ocfs2/dlmfs/dlmfs.c
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/pnode.c
fs/splice.c
fs/super.c
fs/ubifs/auth.c
fs/ubifs/file.c
fs/ubifs/replay.c
fs/vboxsf/super.c
fs/xattr.c
include/asm-generic/topology.h
include/drm/drm_modes.h
include/linux/amba/bus.h
include/linux/backing-dev-defs.h
include/linux/backing-dev.h
include/linux/brcmphy.h
include/linux/compiler.h
include/linux/cper.h
include/linux/device_cgroup.h
include/linux/dma-buf.h
include/linux/dmaengine.h
include/linux/efi.h
include/linux/fanotify.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/host1x.h
include/linux/i2c-mux.h
include/linux/i2c.h
include/linux/ieee80211.h
include/linux/input/lm8333.h
include/linux/intel-iommu.h
include/linux/intel-svm.h
include/linux/iommu.h
include/linux/kvm_host.h
include/linux/lsm_hook_defs.h
include/linux/memcontrol.h
include/linux/mhi.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/netfilter/nf_conntrack_pptp.h
include/linux/nfs_xdr.h
include/linux/pci-ats.h
include/linux/platform_data/cros_ec_sensorhub.h
include/linux/platform_device.h
include/linux/ptp_clock_kernel.h
include/linux/skmsg.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/gss_api.h
include/linux/sunrpc/gss_krb5.h
include/linux/sunrpc/xdr.h
include/linux/tcp.h
include/linux/tty.h
include/linux/uacce.h
include/linux/virtio_net.h
include/linux/virtio_vsock.h
include/net/act_api.h
include/net/af_rxrpc.h
include/net/espintcp.h
include/net/flow_offload.h
include/net/inet_ecn.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/mptcp.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_flow_table.h
include/net/nexthop.h
include/net/sch_generic.h
include/net/tcp.h
include/net/tls.h
include/net/udp_tunnel.h
include/rdma/uverbs_std_types.h
include/soc/mscc/ocelot.h
include/sound/rawmidi.h
include/trace/events/gpu_mem.h
include/trace/events/rpcrdma.h
include/trace/events/rxrpc.h
include/trace/events/wbt.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/bpf.h
include/uapi/linux/dlm_device.h
include/uapi/linux/dma-buf.h
include/uapi/linux/fiemap.h
include/uapi/linux/hyperv.h
include/uapi/linux/if_arcnet.h
include/uapi/linux/iommu.h
include/uapi/linux/mmc/ioctl.h
include/uapi/linux/net_dropmon.h
include/uapi/linux/netfilter_bridge/ebt_among.h
include/uapi/linux/usb/raw_gadget.h
include/uapi/linux/xfrm.h
include/uapi/scsi/scsi_bsg_fc.h
init/Kconfig
init/initramfs.c
init/main.c
ipc/mqueue.c
ipc/util.c
kernel/bpf/arraymap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/rstat.c
kernel/fork.c
kernel/kcov.c
kernel/power/hibernate.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/trace/Kconfig
kernel/trace/bpf_trace.c
kernel/trace/ftrace_internal.h
kernel/trace/preemptirq_delay_test.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_boot.c
kernel/trace/trace_kprobe.c
kernel/umh.c
lib/Kconfig.ubsan
lib/kunit/test.c
lib/test_printf.c
lib/vsprintf.c
mm/backing-dev.c
mm/gup.c
mm/kasan/Makefile
mm/kasan/generic.c
mm/kasan/kasan.h
mm/kasan/tags.c
mm/khugepaged.c
mm/memcontrol.c
mm/mremap.c
mm/page_alloc.c
mm/percpu.c
mm/slub.c
mm/vmscan.c
mm/z3fold.c
net/atm/common.c
net/atm/lec.c
net/ax25/af_ax25.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/network-coding.c
net/batman-adv/sysfs.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/netfilter/nft_reject_bridge.c
net/ceph/osd_client.c
net/core/dev.c
net/core/devlink.c
net/core/drop_monitor.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/netprio_cgroup.c
net/core/sock.c
net/dsa/dsa2.c
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_mtk.c
net/ethtool/netlink.c
net/ethtool/strset.c
net/hsr/hsr_slave.c
net/ipv4/cipso_ipv4.c
net/ipv4/devinet.c
net/ipv4/esp4_offload.c
net/ipv4/fib_frontend.c
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_nat_pptp.c
net/ipv4/nexthop.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_input.c
net/ipv6/calipso.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/seg6.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/mesh_hwmp.c
net/mptcp/crypto.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_pptp.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_nat_proto.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_osf.c
net/netfilter/nft_set_rbtree.c
net/netlabel/netlabel_kapi.c
net/qrtr/ns.c
net/qrtr/qrtr.c
net/rxrpc/Makefile
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/input.c
net/rxrpc/misc.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/proc.c
net/rxrpc/rtt.c [new file with mode: 0644]
net/rxrpc/rxkad.c
net/rxrpc/sendmsg.c
net/rxrpc/sysctl.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/sch_choke.c
net/sched/sch_fq_codel.c
net/sched/sch_fq_pie.c
net/sched/sch_sfq.c
net/sched/sch_skbprio.c
net/sctp/Kconfig
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/ulpevent.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/auth_gss/gss_mech_switch.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/clnt.c
net/sunrpc/xdr.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/socket.c
net/tipc/subscr.h
net/tipc/topsrv.c
net/tipc/udp_media.c
net/tls/tls_sw.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/core.c
net/x25/x25_subr.c
net/xdp/xdp_umem.c
net/xfrm/espintcp.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
samples/bpf/lwt_len_hist_user.c
samples/trace_events/trace-events-sample.h
scripts/checkpatch.pl
scripts/decodecode
scripts/gcc-plugins/Makefile
scripts/gcc-plugins/gcc-common.h
scripts/gcc-plugins/stackleak_plugin.c
scripts/gdb/linux/rbtree.py
scripts/kallsyms.c
security/Makefile
security/apparmor/apparmorfs.c
security/apparmor/audit.c
security/apparmor/domain.c
security/commoncap.c
security/device_cgroup.c
security/integrity/evm/evm_crypto.c
security/integrity/evm/evm_main.c
security/integrity/evm/evm_secfs.c
security/integrity/ima/ima_crypto.c
security/integrity/ima/ima_fs.c
security/security.c
security/selinux/hooks.c
security/selinux/ss/conditional.c
sound/core/hwdep.c
sound/core/oss/pcm_plugin.c
sound/core/pcm_lib.c
sound/core/rawmidi.c
sound/firewire/amdtp-stream-trace.h
sound/isa/opti9xx/miro.c
sound/isa/opti9xx/opti92x-ad1848.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1712.c
sound/usb/line6/podhd.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/arch/x86/include/uapi/asm/unistd.h
tools/bootconfig/main.c
tools/cgroup/iocost_monitor.py
tools/lib/bpf/bpf_tracing.h
tools/objtool/check.c
tools/objtool/elf.h
tools/testing/selftests/bpf/prog_tests/mmap.c
tools/testing/selftests/bpf/progs/test_mmap.c
tools/testing/selftests/bpf/progs/test_overhead.c
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh
tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
tools/testing/selftests/ftrace/test.d/functions
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/intel_pstate/Makefile
tools/testing/selftests/kselftest_deps.sh [new file with mode: 0755]
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/evmcs.h
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/debug_regs.c [new file with mode: 0644]
tools/testing/selftests/lkdtm/run.sh
tools/testing/selftests/memfd/Makefile
tools/testing/selftests/net/mptcp/pm_netlink.sh
tools/testing/selftests/net/tcp_mmap.c
tools/testing/selftests/nsfs/pidns.c
tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json [new file with mode: 0644]
tools/testing/selftests/vm/.gitignore
tools/testing/selftests/vm/write_to_hugetlbfs.c
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/Makefile
tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
tools/testing/selftests/wireguard/qemu/debug.config
virt/kvm/arm/hyp/aarch32.c
virt/kvm/arm/psci.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-mmio.h
virt/kvm/kvm_main.c

index db3754a4101820c9bba4f3e6b5682747ae0b02be..4f906b4e9785f7628ff5fe79fd4da3654ffc7bdc 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -288,6 +288,8 @@ Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Will Deacon <will@kernel.org> <will.deacon@arm.com>
+Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de>
+Wolfram Sang <wsa@kernel.org> <w.sang@pengutronix.de>
 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index c00f9f11e3f3f6133d4b741244e034ae2a3c430e..8439d2ae689b4e2bc99f4110340c4cfef8f6d1e4 100644 (file)
@@ -182,12 +182,15 @@ fix_padding
        space-efficient. If this option is not present, large padding is
        used - that is for compatibility with older kernels.
 
-
-The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
-be changed when reloading the target (load an inactive table and swap the
-tables with suspend and resume). The other arguments should not be changed
-when reloading the target because the layout of disk data depend on them
-and the reloaded target would be non-functional.
+allow_discards
+       Allow block discard requests (a.k.a. TRIM) for the integrity device.
+       Discards are only allowed to devices using internal hash.
+
+The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
+allow_discards can be changed when reloading the target (load an inactive
+table and swap the tables with suspend and resume). The other arguments
+should not be changed when reloading the target because the layout of disk
+data depend on them and the reloaded target would be non-functional.
 
 
 The layout of the formatted block device:
index 8ebe46b1af39d88171acc6055759db0b8799ff20..5dfcc4592b23efe335673ea13ba34fbbe4da6338 100644 (file)
@@ -112,6 +112,20 @@ used when printing stack backtraces. The specifier takes into
 consideration the effect of compiler optimisations which may occur
 when tail-calls are used and marked with the noreturn GCC attribute.
 
+Probed Pointers from BPF / tracing
+----------------------------------
+
+::
+
+       %pks    kernel string
+       %pus    user string
+
+The ``k`` and ``u`` specifiers are used for printing prior probed memory from
+either kernel memory (k) or user memory (u). The subsequent ``s`` specifier
+results in printing a string. For direct use in regular vsnprintf() the (k)
+and (u) annotation is ignored, however, when used out of BPF's bpf_trace_printk(),
+for example, it reads the memory it is pointing to without faulting.
+
 Kernel Pointers
 ---------------
 
index e77b08ebcd06502c52d2e21ed37f6fee0e5aba18..ee1754739b4b1a18ed07570936ef045ada4fb2d7 100644 (file)
@@ -10,7 +10,8 @@ Required properties:
 - compatible :
        - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
        - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
-       - "fsl,fsl,ls1028a-edma" for eDMA used similar to that on Vybrid vf610 SoC
+       - "fsl,ls1028a-edma" followed by "fsl,vf610-edma" for eDMA used on the
+         LS1028A SoC.
 - reg : Specifies base physical address(s) and size of the eDMA registers.
        The 1st region is eDMA control register's address and size.
        The 2nd and the 3rd regions are programmable channel multiplexing
index 86cfb599256ed5ddd67191164b2fcd01745ae714..371f187731983f2450f56f324f2e3ad23c5ec4df 100644 (file)
@@ -22,9 +22,7 @@ properties:
     const: socionext,uniphier-xdmac
 
   reg:
-    items:
-      - description: XDMAC base register region (offset and length)
-      - description: XDMAC extension register region (offset and length)
+    maxItems: 1
 
   interrupts:
     maxItems: 1
@@ -49,12 +47,13 @@ required:
   - reg
   - interrupts
   - "#dma-cells"
+  - dma-channels
 
 examples:
   - |
     xdmac: dma-controller@5fc10000 {
         compatible = "socionext,uniphier-xdmac";
-        reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
+        reg = <0x5fc10000 0x5300>;
         interrupts = <0 188 4>;
         #dma-cells = <2>;
         dma-channels = <16>;
diff --git a/Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml b/Documentation/devicetree/bindings/iommu/allwinner,sun50i-h6-iommu.yaml
new file mode 100644 (file)
index 0000000..5e125cf
--- /dev/null
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/allwinner,sun50i-h6-iommu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner H6 IOMMU Device Tree Bindings
+
+maintainers:
+  - Chen-Yu Tsai <wens@csie.org>
+  - Maxime Ripard <mripard@kernel.org>
+
+properties:
+  "#iommu-cells":
+    const: 1
+    description:
+      The content of the cell is the master ID.
+
+  compatible:
+    const: allwinner,sun50i-h6-iommu
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+required:
+  - "#iommu-cells"
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+      #include <dt-bindings/interrupt-controller/arm-gic.h>
+      #include <dt-bindings/interrupt-controller/irq.h>
+
+      #include <dt-bindings/clock/sun50i-h6-ccu.h>
+      #include <dt-bindings/reset/sun50i-h6-ccu.h>
+
+      iommu: iommu@30f0000 {
+          compatible = "allwinner,sun50i-h6-iommu";
+          reg = <0x030f0000 0x10000>;
+          interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+          clocks = <&ccu CLK_BUS_IOMMU>;
+          resets = <&ccu RST_BUS_IOMMU>;
+          #iommu-cells = <1>;
+      };
+
+...
index 5201bc15fdd67c1df1ba1b49715d4563cc14dbbd..cfd1afdc6e9401b9a0972398231542df6037548c 100644 (file)
@@ -110,6 +110,9 @@ Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
                        #size-cells = <0>;
 
                        ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
                                port0@0 {
                                        reg = <0>;
                                        label = "lan1";
index 5b58fc4e1268ce92bb852765e2403dc4d6cf227e..4574352d6ff4e8b9ab90bd51449bb59f5d4c01cb 100644 (file)
@@ -61,8 +61,8 @@ The ``ice`` driver reports the following versions
       - running
       - ICE OS Default Package
       - The name of the DDP package that is active in the device. The DDP
-        package is loaded by the driver during initialization. Each varation
-        of DDP package shall have a unique name.
+        package is loaded by the driver during initialization. Each
+        variation of the DDP package has a unique name.
     * - ``fw.app``
       - running
       - 1.3.1.0
index acb2f1b36350fec4eeab9a7b6129a0334f87945b..17a8e584f15f6ccfd770e934d35eb4702253166d 100644 (file)
@@ -84,15 +84,20 @@ Get a decent editor and don't leave whitespace at the end of lines.
 Coding style is all about readability and maintainability using commonly
 available tools.
 
-The limit on the length of lines is 80 columns and this is a strongly
-preferred limit.
-
-Statements longer than 80 columns will be broken into sensible chunks, unless
-exceeding 80 columns significantly increases readability and does not hide
-information. Descendants are always substantially shorter than the parent and
-are placed substantially to the right. The same applies to function headers
-with a long argument list. However, never break user-visible strings such as
-printk messages, because that breaks the ability to grep for them.
+The preferred limit on the length of a single line is 80 columns.
+
+Statements longer than 80 columns should be broken into sensible chunks,
+unless exceeding 80 columns significantly increases readability and does
+not hide information.
+
+Descendants are always substantially shorter than the parent and are
+are placed substantially to the right.  A very commonly used style
+is to align descendants to a function open parenthesis.
+
+These same rules are applied to function headers with a long argument list.
+
+However, never break user-visible strings such as printk messages because
+that breaks the ability to grep for them.
 
 
 3) Placing Braces and Spaces
index 9e78cb858f861b7ca52b987fcaed9942f3c2610c..68d879a8009ecef069d46c9a241210483b83ef0a 100644 (file)
@@ -27,9 +27,8 @@ differences are:
 3. Raw Gadget provides a way to select a UDC device/driver to bind to,
    while GadgetFS currently binds to the first available UDC.
 
-4. Raw Gadget uses predictable endpoint names (handles) across different
-   UDCs (as long as UDCs have enough endpoints of each required transfer
-   type).
+4. Raw Gadget explicitly exposes information about endpoints addresses and
+   capabilities allowing a user to write UDC-agnostic gadgets.
 
 5. Raw Gadget has ioctl-based interface instead of a filesystem-based one.
 
@@ -50,12 +49,36 @@ The typical usage of Raw Gadget looks like:
    Raw Gadget and react to those depending on what kind of USB device
    needs to be emulated.
 
+Note, that some UDC drivers have fixed addresses assigned to endpoints, and
+therefore arbitrary endpoint addresses can't be used in the descriptors.
+Nevertheles, Raw Gadget provides a UDC-agnostic way to write USB gadgets.
+Once a USB_RAW_EVENT_CONNECT event is received via USB_RAW_IOCTL_EVENT_FETCH,
+the USB_RAW_IOCTL_EPS_INFO ioctl can be used to find out information about
+endpoints that the UDC driver has. Based on that information, the user must
+chose UDC endpoints that will be used for the gadget being emulated, and
+properly assign addresses in endpoint descriptors.
+
+You can find usage examples (along with a test suite) here:
+
+https://github.com/xairy/raw-gadget
+
+Internal details
+~~~~~~~~~~~~~~~~
+
+Currently every endpoint read/write ioctl submits a USB request and waits until
+its completion. This is the desired mode for coverage-guided fuzzing (as we'd
+like all USB request processing happen during the lifetime of a syscall),
+and must be kept in the implementation. (This might be slow for real world
+applications, thus the O_NONBLOCK improvement suggestion below.)
+
 Potential future improvements
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-- Implement ioctl's for setting/clearing halt status on endpoints.
-
-- Reporting more events (suspend, resume, etc.) through
-  USB_RAW_IOCTL_EVENT_FETCH.
+- Report more events (suspend, resume, etc.) through USB_RAW_IOCTL_EVENT_FETCH.
 
 - Support O_NONBLOCK I/O.
+
+- Support USB 3 features (accept SS endpoint companion descriptor when
+  enabling endpoints; allow providing stream_id for bulk transfers).
+
+- Support ISO transfer features (expose frame_number for completed requests).
index dcc252634cf96f09d8f02da4b30afb36b924308a..b6833c7bb474192b1ac78b26ab073c9bfee5ada1 100644 (file)
@@ -28,3 +28,5 @@ KVM
    arm/index
 
    devices/index
+
+   running-nested-guests
diff --git a/Documentation/virt/kvm/running-nested-guests.rst b/Documentation/virt/kvm/running-nested-guests.rst
new file mode 100644 (file)
index 0000000..d0a1fc7
--- /dev/null
@@ -0,0 +1,276 @@
+==============================
+Running nested guests with KVM
+==============================
+
+A nested guest is the ability to run a guest inside another guest (it
+can be KVM-based or a different hypervisor).  The straightforward
+example is a KVM guest that in turn runs on a KVM guest (the rest of
+this document is built on this example)::
+
+              .----------------.  .----------------.
+              |                |  |                |
+              |      L2        |  |      L2        |
+              | (Nested Guest) |  | (Nested Guest) |
+              |                |  |                |
+              |----------------'--'----------------|
+              |                                    |
+              |       L1 (Guest Hypervisor)        |
+              |          KVM (/dev/kvm)            |
+              |                                    |
+      .------------------------------------------------------.
+      |                 L0 (Host Hypervisor)                 |
+      |                    KVM (/dev/kvm)                    |
+      |------------------------------------------------------|
+      |        Hardware (with virtualization extensions)     |
+      '------------------------------------------------------'
+
+Terminology:
+
+- L0 – level-0; the bare metal host, running KVM
+
+- L1 – level-1 guest; a VM running on L0; also called the "guest
+  hypervisor", as it itself is capable of running KVM.
+
+- L2 – level-2 guest; a VM running on L1, this is the "nested guest"
+
+.. note:: The above diagram is modelled after the x86 architecture;
+          s390x, ppc64 and other architectures are likely to have
+          a different design for nesting.
+
+          For example, s390x always has an LPAR (LogicalPARtition)
+          hypervisor running on bare metal, adding another layer and
+          resulting in at least four levels in a nested setup — L0 (bare
+          metal, running the LPAR hypervisor), L1 (host hypervisor), L2
+          (guest hypervisor), L3 (nested guest).
+
+          This document will stick with the three-level terminology (L0,
+          L1, and L2) for all architectures; and will largely focus on
+          x86.
+
+
+Use Cases
+---------
+
+There are several scenarios where nested KVM can be useful, to name a
+few:
+
+- As a developer, you want to test your software on different operating
+  systems (OSes).  Instead of renting multiple VMs from a Cloud
+  Provider, using nested KVM lets you rent a large enough "guest
+  hypervisor" (level-1 guest).  This in turn allows you to create
+  multiple nested guests (level-2 guests), running different OSes, on
+  which you can develop and test your software.
+
+- Live migration of "guest hypervisors" and their nested guests, for
+  load balancing, disaster recovery, etc.
+
+- VM image creation tools (e.g. ``virt-install``,  etc) often run
+  their own VM, and users expect these to work inside a VM.
+
+- Some OSes use virtualization internally for security (e.g. to let
+  applications run safely in isolation).
+
+
+Enabling "nested" (x86)
+-----------------------
+
+From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+by default for Intel and AMD.  (Though your Linux distribution might
+override this default.)
+
+In case you are running a Linux kernel older than v4.19, to enable
+nesting, set the ``nested`` KVM module parameter to ``Y`` or ``1``.  To
+persist this setting across reboots, you can add it in a config file, as
+shown below:
+
+1. On the bare metal host (L0), list the kernel modules and ensure that
+   the KVM modules::
+
+    $ lsmod | grep -i kvm
+    kvm_intel             133627  0
+    kvm                   435079  1 kvm_intel
+
+2. Show information for ``kvm_intel`` module::
+
+    $ modinfo kvm_intel | grep -i nested
+    parm:           nested:bool
+
+3. For the nested KVM configuration to persist across reboots, place the
+   below in ``/etc/modprobed/kvm_intel.conf`` (create the file if it
+   doesn't exist)::
+
+    $ cat /etc/modprobe.d/kvm_intel.conf
+    options kvm-intel nested=y
+
+4. Unload and re-load the KVM Intel module::
+
+    $ sudo rmmod kvm-intel
+    $ sudo modprobe kvm-intel
+
+5. Verify if the ``nested`` parameter for KVM is enabled::
+
+    $ cat /sys/module/kvm_intel/parameters/nested
+    Y
+
+For AMD hosts, the process is the same as above, except that the module
+name is ``kvm-amd``.
+
+
+Additional nested-related kernel parameters (x86)
+-------------------------------------------------
+
+If your hardware is sufficiently advanced (Intel Haswell processor or
+higher, which has newer hardware virt extensions), the following
+additional features will also be enabled by default: "Shadow VMCS
+(Virtual Machine Control Structure)", APIC Virtualization on your bare
+metal host (L0).  Parameters for Intel hosts::
+
+    $ cat /sys/module/kvm_intel/parameters/enable_shadow_vmcs
+    Y
+
+    $ cat /sys/module/kvm_intel/parameters/enable_apicv
+    Y
+
+    $ cat /sys/module/kvm_intel/parameters/ept
+    Y
+
+.. note:: If you suspect your L2 (i.e. nested guest) is running slower,
+          ensure the above are enabled (particularly
+          ``enable_shadow_vmcs`` and ``ept``).
+
+
+Starting a nested guest (x86)
+-----------------------------
+
+Once your bare metal host (L0) is configured for nesting, you should be
+able to start an L1 guest with::
+
+    $ qemu-kvm -cpu host [...]
+
+The above will pass through the host CPU's capabilities as-is to the
+gues); or for better live migration compatibility, use a named CPU
+model supported by QEMU. e.g.::
+
+    $ qemu-kvm -cpu Haswell-noTSX-IBRS,vmx=on
+
+then the guest hypervisor will subsequently be capable of running a
+nested guest with accelerated KVM.
+
+
+Enabling "nested" (s390x)
+-------------------------
+
+1. On the host hypervisor (L0), enable the ``nested`` parameter on
+   s390x::
+
+    $ rmmod kvm
+    $ modprobe kvm nested=1
+
+.. note:: On s390x, the kernel parameter ``hpage`` is mutually exclusive
+          with the ``nested`` paramter — i.e. to be able to enable
+          ``nested``, the ``hpage`` parameter *must* be disabled.
+
+2. The guest hypervisor (L1) must be provided with the ``sie`` CPU
+   feature — with QEMU, this can be done by using "host passthrough"
+   (via the command-line ``-cpu host``).
+
+3. Now the KVM module can be loaded in the L1 (guest hypervisor)::
+
+    $ modprobe kvm
+
+
+Live migration with nested KVM
+------------------------------
+
+Migrating an L1 guest, with a  *live* nested guest in it, to another
+bare metal host, works as of Linux kernel 5.3 and QEMU 4.2.0 for
+Intel x86 systems, and even on older versions for s390x.
+
+On AMD systems, once an L1 guest has started an L2 guest, the L1 guest
+should no longer be migrated or saved (refer to QEMU documentation on
+"savevm"/"loadvm") until the L2 guest shuts down.  Attempting to migrate
+or save-and-load an L1 guest while an L2 guest is running will result in
+undefined behavior.  You might see a ``kernel BUG!`` entry in ``dmesg``, a
+kernel 'oops', or an outright kernel panic.  Such a migrated or loaded L1
+guest can no longer be considered stable or secure, and must be restarted.
+Migrating an L1 guest merely configured to support nesting, while not
+actually running L2 guests, is expected to function normally even on AMD
+systems but may fail once guests are started.
+
+Migrating an L2 guest is always expected to succeed, so all the following
+scenarios should work even on AMD systems:
+
+- Migrating a nested guest (L2) to another L1 guest on the *same* bare
+  metal host.
+
+- Migrating a nested guest (L2) to another L1 guest on a *different*
+  bare metal host.
+
+- Migrating a nested guest (L2) to a bare metal host.
+
+Reporting bugs from nested setups
+-----------------------------------
+
+Debugging "nested" problems can involve sifting through log files across
+L0, L1 and L2; this can result in tedious back-n-forth between the bug
+reporter and the bug fixer.
+
+- Mention that you are in a "nested" setup.  If you are running any kind
+  of "nesting" at all, say so.  Unfortunately, this needs to be called
+  out because when reporting bugs, people tend to forget to even
+  *mention* that they're using nested virtualization.
+
+- Ensure you are actually running KVM on KVM.  Sometimes people do not
+  have KVM enabled for their guest hypervisor (L1), which results in
+  them running with pure emulation or what QEMU calls it as "TCG", but
+  they think they're running nested KVM.  Thus confusing "nested Virt"
+  (which could also mean, QEMU on KVM) with "nested KVM" (KVM on KVM).
+
+Information to collect (generic)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following is not an exhaustive list, but a very good starting point:
+
+  - Kernel, libvirt, and QEMU version from L0
+
+  - Kernel, libvirt and QEMU version from L1
+
+  - QEMU command-line of L1 -- when using libvirt, you'll find it here:
+    ``/var/log/libvirt/qemu/instance.log``
+
+  - QEMU command-line of L2 -- as above, when using libvirt, get the
+    complete libvirt-generated QEMU command-line
+
+  - ``cat /sys/cpuinfo`` from L0
+
+  - ``cat /sys/cpuinfo`` from L1
+
+  - ``lscpu`` from L0
+
+  - ``lscpu`` from L1
+
+  - Full ``dmesg`` output from L0
+
+  - Full ``dmesg`` output from L1
+
+x86-specific info to collect
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both the below commands, ``x86info`` and ``dmidecode``, should be
+available on most Linux distributions with the same name:
+
+  - Output of: ``x86info -a`` from L0
+
+  - Output of: ``x86info -a`` from L1
+
+  - Output of: ``dmidecode`` from L0
+
+  - Output of: ``dmidecode`` from L1
+
+s390x-specific info to collect
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Along with the earlier mentioned generic details, the below is
+also recommended:
+
+  - ``/proc/sysinfo`` from L1; this will also include the info from L0
index 26f281d9f32a4ad35d744a90353c93177b309287..50659d76976b712dffa0e14e287f2e142235116a 100644 (file)
@@ -3657,7 +3657,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
 F:     include/linux/btrfs*
@@ -3936,11 +3936,9 @@ F:       arch/powerpc/platforms/cell/
 CEPH COMMON CODE (LIBCEPH)
 M:     Ilya Dryomov <idryomov@gmail.com>
 M:     Jeff Layton <jlayton@kernel.org>
-M:     Sage Weil <sage@redhat.com>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:     git git://github.com/ceph/ceph-client.git
 F:     include/linux/ceph/
 F:     include/linux/crush/
@@ -3948,12 +3946,10 @@ F:      net/ceph/
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
 M:     Jeff Layton <jlayton@kernel.org>
-M:     Sage Weil <sage@redhat.com>
 M:     Ilya Dryomov <idryomov@gmail.com>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:     git git://github.com/ceph/ceph-client.git
 F:     Documentation/filesystems/ceph.rst
 F:     fs/ceph/
@@ -5511,10 +5507,10 @@ F:      drivers/gpu/drm/vboxvideo/
 
 DRM DRIVER FOR VMWARE VIRTUAL GPU
 M:     "VMware Graphics" <linux-graphics-maintainer@vmware.com>
-M:     Thomas Hellstrom <thellstrom@vmware.com>
+M:     Roland Scheidegger <sroland@vmware.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
-T:     git git://people.freedesktop.org/~thomash/linux
+T:     git git://people.freedesktop.org/~sroland/linux
 F:     drivers/gpu/drm/vmwgfx/
 F:     include/uapi/drm/vmwgfx_drm.h
 
@@ -5935,9 +5931,9 @@ F:        lib/dynamic_debug.c
 DYNAMIC INTERRUPT MODERATION
 M:     Tal Gilboa <talgi@mellanox.com>
 S:     Maintained
+F:     Documentation/networking/net_dim.rst
 F:     include/linux/dim.h
 F:     lib/dim/
-F:     Documentation/networking/net_dim.rst
 
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
@@ -7119,9 +7115,10 @@ F:       include/uapi/asm-generic/
 
 GENERIC PHY FRAMEWORK
 M:     Kishon Vijay Abraham I <kishon@ti.com>
+M:     Vinod Koul <vkoul@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux-phy.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
 F:     Documentation/devicetree/bindings/phy/
 F:     drivers/phy/
 F:     include/linux/phy/
@@ -7746,11 +7743,6 @@ L:       platform-driver-x86@vger.kernel.org
 S:     Orphan
 F:     drivers/platform/x86/tc1100-wmi.c
 
-HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
-M:     Jaroslav Kysela <perex@perex.cz>
-S:     Obsolete
-F:     drivers/staging/hp/hp100.*
-
 HPET:  High Precision Event Timers driver
 M:     Clemens Ladisch <clemens@ladisch.de>
 S:     Maintained
@@ -7837,7 +7829,7 @@ T:        git git://linuxtv.org/media_tree.git
 F:     drivers/media/platform/sti/hva
 
 HWPOISON MEMORY FAILURE HANDLING
-M:     Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+M:     Naoya Horiguchi <naoya.horiguchi@nec.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/hwpoison-inject.c
@@ -7949,7 +7941,7 @@ F:        Documentation/i2c/busses/i2c-parport.rst
 F:     drivers/i2c/busses/i2c-parport.c
 
 I2C SUBSYSTEM
-M:     Wolfram Sang <wsa@the-dreams.de>
+M:     Wolfram Sang <wsa@kernel.org>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 W:     https://i2c.wiki.kernel.org/
@@ -9193,6 +9185,11 @@ L:       kexec@lists.infradead.org
 S:     Maintained
 W:     http://lse.sourceforge.net/kdump/
 F:     Documentation/admin-guide/kdump/
+F:     fs/proc/vmcore.c
+F:     include/linux/crash_core.h
+F:     include/linux/crash_dump.h
+F:     include/uapi/linux/vmcore.h
+F:     kernel/crash_*.c
 
 KEENE FM RADIO TRANSMITTER DRIVER
 M:     Hans Verkuil <hverkuil@xs4all.nl>
@@ -10670,6 +10667,13 @@ L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/mediatek/
 
+MEDIATEK I2C CONTROLLER DRIVER
+M:     Qii Wang <qii.wang@mediatek.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
+F:     drivers/i2c/busses/i2c-mt65xx.c
+
 MEDIATEK JPEG DRIVER
 M:     Rick Chang <rick.chang@mediatek.com>
 M:     Bin Liu <bin.liu@mediatek.com>
@@ -11718,8 +11722,9 @@ F:      net/core/drop_monitor.c
 
 NETWORKING DRIVERS
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Odd Fixes
+S:     Maintained
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -14102,12 +14107,10 @@ F:    drivers/media/radio/radio-tea5777.c
 
 RADOS BLOCK DEVICE (RBD)
 M:     Ilya Dryomov <idryomov@gmail.com>
-M:     Sage Weil <sage@redhat.com>
 R:     Dongsheng Yang <dongsheng.yang@easystack.cn>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
 T:     git git://github.com/ceph/ceph-client.git
 F:     Documentation/ABI/testing/sysfs-bus-rbd
 F:     drivers/block/rbd.c
@@ -14644,6 +14647,7 @@ F:      drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
 M:     Julian Wiedmann <jwi@linux.ibm.com>
+M:     Karsten Graul <kgraul@linux.ibm.com>
 M:     Ursula Braun <ubraun@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
@@ -14654,6 +14658,7 @@ F:      net/iucv/
 
 S390 NETWORK DRIVERS
 M:     Julian Wiedmann <jwi@linux.ibm.com>
+M:     Karsten Graul <kgraul@linux.ibm.com>
 M:     Ursula Braun <ubraun@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
index 679f302a8b8bd47288753e065af1273bbf149bfa..b668725a2a62a175f471b68bc035840f60dae5de 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -729,10 +729,6 @@ else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS += -Os
 endif
 
-ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED
-KBUILD_CFLAGS   += -Wno-maybe-uninitialized
-endif
-
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 KBUILD_CFLAGS  += $(call cc-option,-fno-allow-store-data-races)
@@ -881,6 +877,17 @@ KBUILD_CFLAGS += -Wno-pointer-sign
 # disable stringop warnings in gcc 8+
 KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
 
+# We'll want to enable this eventually, but it's not going away for 5.7 at least
+KBUILD_CFLAGS += $(call cc-disable-warning, zero-length-bounds)
+KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
+
+# Another good warning that we'll want to enable eventually
+KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
+
+# Enabled with W=2, disabled by default as noisy
+KBUILD_CFLAGS += $(call cc-disable-warning, maybe-uninitialized)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += $(call cc-option,-fno-strict-overflow)
 
index 0974226fab550ff384d59c8514fcb79139e7c1d4..aa000075a575744427c42f2399e87b5eced91277 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_DRM_UDL=y
 CONFIG_DRM_ETNAVIV=y
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
index e1aa212ca6ebd1203e34dfc0c0f310af47964f77..cd5636dfeb6f4119013ace57fa5662e129fe7947 100644 (file)
 
 /* clobbers r5 register */
 .macro DSP_EARLY_INIT
+#ifdef CONFIG_ISA_ARCV2
        lr      r5, [ARC_AUX_DSP_BUILD]
        bmsk    r5, r5, 7
        breq    r5, 0, 1f
        mov     r5, DSP_CTRL_DISABLED_ALL
        sr      r5, [ARC_AUX_DSP_CTRL]
 1:
+#endif
 .endm
 
 /* clobbers r10, r11 registers pair */
index ae0aa5323be1a6154bbe5bd4b6ff51d2a40199fe..0ff4c061056157b1968eb71a2c50a47f71b700a3 100644 (file)
 
 #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
        __RESTORE_REGFILE_HARD
+
+       ; SP points to PC/STAT32: hw restores them despite NO_AUTOSAVE
        add     sp, sp, SZ_PT_REGS - 8
 #else
        add     sp, sp, PT_r0
index 75539670431a63c91b7e605bcf8467533469e93e..8c4fc4b54c1446b3bb41d4b035c405576ecfc53f 100644 (file)
@@ -3,9 +3,6 @@
 # Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 #
 
-# Pass UTS_MACHINE for user_regset definition
-CFLAGS_ptrace.o                += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
 obj-y  := arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
 obj-y  += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
 obj-$(CONFIG_ISA_ARCOMPACT)            += entry-compact.o intc-compact.o
index d5f3fcf273b533b3feffdf0997fc275aea6fa132..f49a054a1016cb5d0ae61a5037ce6225b25d786d 100644 (file)
@@ -253,7 +253,7 @@ static const struct user_regset arc_regsets[] = {
 };
 
 static const struct user_regset_view user_arc_view = {
-       .name           = UTS_MACHINE,
+       .name           = "arc",
        .e_machine      = EM_ARC_INUSE,
        .regsets        = arc_regsets,
        .n              = ARRAY_SIZE(arc_regsets)
index b2b1cb645d9e90c4d41cb7e03226eee1fe361a88..dad8a656a2f1b7a70ab30a0db62c19270b267a86 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/clocksource.h>
 #include <linux/console.h>
 #include <linux/module.h>
+#include <linux/sizes.h>
 #include <linux/cpu.h>
 #include <linux/of_clk.h>
 #include <linux/of_fdt.h>
@@ -424,12 +425,12 @@ static void arc_chk_core_config(void)
        if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
                panic("Linux built with incorrect DCCM Base address\n");
 
-       if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+       if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
                panic("Linux built with incorrect DCCM Size\n");
 #endif
 
 #ifdef CONFIG_ARC_HAS_ICCM
-       if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+       if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
                panic("Linux built with incorrect ICCM Size\n");
 #endif
 
index d2999503fb8a5f1095419a673bd317f4e4690c6c..3393558876a9b154ef723b8c0e025ca1f96ab633 100644 (file)
@@ -191,10 +191,9 @@ void show_regs(struct pt_regs *regs)
        if (user_mode(regs))
                show_faulting_vma(regs->ret); /* faulting code, not data */
 
-       pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
-               regs->event, current->thread.fault_address, regs->ret);
-
-       pr_info("STAT32: 0x%08lx", regs->status32);
+       pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\nSTAT: 0x%08lx",
+               regs->event, current->thread.fault_address, regs->ret,
+               regs->status32);
 
 #define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
 
@@ -210,11 +209,10 @@ void show_regs(struct pt_regs *regs)
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE));
 #endif
-       pr_cont("  BTA: 0x%08lx\n", regs->bta);
-       pr_info("BLK: %pS\n SP: 0x%08lx  FP: 0x%08lx\n",
-               (void *)regs->blink, regs->sp, regs->fp);
+       pr_cont("  BTA: 0x%08lx\n  SP: 0x%08lx  FP: 0x%08lx BLK: %pS\n",
+               regs->bta, regs->sp, regs->fp, (void *)regs->blink);
        pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
-              regs->lp_start, regs->lp_end, regs->lp_count);
+               regs->lp_start, regs->lp_end, regs->lp_count);
 
        /* print regs->r0 thru regs->r12
         * Sequential printing was generating horrible code
index 27ea64b1fa3321c3d86f2c25b419e1fd1882b269..f87758a6851bd132124c88512a9fbccf7870c49d 100644 (file)
@@ -1178,11 +1178,9 @@ int arc_unwind(struct unwind_frame_info *frame)
 #endif
 
        /* update frame */
-#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
        if (frame->call_frame
            && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
                frame->call_frame = 0;
-#endif
        cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
        startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
        endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
index a931d0a256d01ab0e78d4b8402c77d2567792bff..a645bca5899a07fee05fb3a00e54bbb135cd46c4 100644 (file)
@@ -6,6 +6,7 @@
 
 menuconfig ARC_PLAT_EZNPS
        bool "\"EZchip\" ARC dev platform"
+       depends on ISA_ARCOMPACT
        select CPU_BIG_ENDIAN
        select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
        select EZNPS_GIC
index 66a04f6f477530090070fb46a26baa7cc240165c..c77c93c485a081745acdd98c39d693ffc6eae3a6 100644 (file)
@@ -12,6 +12,7 @@ config ARM
        select ARCH_HAS_KEEPINITRD
        select ARCH_HAS_KCOV
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
        select ARCH_HAS_PHYS_TO_DMA
        select ARCH_HAS_SETUP_DMA_OPS
index b247f399de711b36a98f9945d51231797e248659..f82b5962d97ecb79da906672d50a43683a472c69 100644 (file)
@@ -42,7 +42,7 @@ SECTIONS
   }
   .table : ALIGN(4) {
     _table_start = .;
-    LONG(ZIMAGE_MAGIC(2))
+    LONG(ZIMAGE_MAGIC(4))
     LONG(ZIMAGE_MAGIC(0x5a534c4b))
     LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
     LONG(ZIMAGE_MAGIC(_kernel_bss_size))
index 811c8cae315b520f445964fc07dc00e16266b5f9..d692e3b2812a0e74b1014dec8487c28fd5c0f390 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
 };
 
 &elm {
index 9f66f96d09c91661a0917e4abcdf701086059ba4..a958f9ee4a5ab9e7b957b19cd10d1984a81594e2 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
 };
 
 &rtc {
index 25222497f82854a9edbe213c144d3a9f4daa9634..4d5a7ca2e25d4d52955fd52c8f5146909011d8e9 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <1>;
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <2>;
 };
 
index 669559c9c95b3a83c93e412d7f526cbc6131077c..c13756fa0f55a6dd345c7a5ca6aeea172aafddbc 100644 (file)
 
 &cpsw_port1 {
        phy-handle = <&ethphy0_sw>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        ti,dual-emac-pvid = <1>;
 };
 
 &cpsw_port2 {
        phy-handle = <&ethphy1_sw>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        ti,dual-emac-pvid = <2>;
 };
 
index fa0088025b2c5483a83112955a8c3fd0be3c5f6b..85c95cc551dd5dfad011986e044580686e9bf1ac 100644 (file)
@@ -40,3 +40,7 @@
        status = "okay";
        dual_emac;
 };
+
+&m_can0 {
+       status = "disabled";
+};
index a813a0cf3ff39a97af53723a5973fe721a1c0117..565675354de429984ce6b77ae4475d42d9c53b88 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&phy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <1>;
 };
 
 &cpsw_emac1 {
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <2>;
 };
 
index aa5e55f981792885292dcc28cdce6f29fd3c22ea..a3ff1237d1fac2a27db9c51bc783834043153ff7 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <1>;
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
        dual_emac_res_vlan = <2>;
 };
 
index 6142c672811e510f416d3c0cfca2b417d3c03c25..5e5f5ca3c86f1654e457b0608faf0b3985b7babc 100644 (file)
@@ -75,7 +75,7 @@
                timer@20200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x20200 0x100>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&periph_clk>;
                };
 
@@ -83,7 +83,7 @@
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x20600 0x20>;
                        interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
-                                                 IRQ_TYPE_LEVEL_HIGH)>;
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&periph_clk>;
                };
 
@@ -91,7 +91,7 @@
                        compatible = "arm,cortex-a9-twd-wdt";
                        reg = <0x20620 0x20>;
                        interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
-                                                 IRQ_TYPE_LEVEL_HIGH)>;
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&periph_clk>;
                };
 
index 4c3f606e5b8d8ebae250daef6d3a26c0f8488ff3..f65448c01e317ba00ad30dfcfd8137e7dc1cb798 100644 (file)
@@ -24,7 +24,7 @@
 
        leds {
                act {
-                       gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
+                       gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
                };
        };
 
index 44ed5a79816453e81a47eeacdc13ec7b80bd666d..c28ca0540f034eba6972708bfe8ba0f317b76860 100644 (file)
 
                                        davinci_mdio: mdio@800 {
                                                compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
-                                               clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>;
+                                               clocks = <&cpsw_125mhz_gclk>;
                                                clock-names = "fck";
                                                #address-cells = <1>;
                                                #size-cells = <0>;
index 4740989ed9c4a5ec03be808fe73a7f22aeae8b96..7191ee6a1b826adb92d7b29c58904ddb27389896 100644 (file)
                        #address-cells = <1>;
                        ranges = <0x51000000 0x51000000 0x3000
                                  0x0        0x20000000 0x10000000>;
+                       dma-ranges;
                        /**
                         * To enable PCI endpoint mode, disable the pcie1_rc
                         * node and enable pcie1_ep mode.
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x20013000 0x13000 0 0xffed000>;
-                               dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
                                bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
                        #address-cells = <1>;
                        ranges = <0x51800000 0x51800000 0x3000
                                  0x0        0x30000000 0x10000000>;
+                       dma-ranges;
                        status = "disabled";
                        pcie2_rc: pcie@51800000 {
                                reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x30013000 0x13000 0 0xffed000>;
-                               dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
                                bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
index 0cd75dadf292c2702077b29b2ac45e29b1d6b4e9..188639738dc3e1c803bac924d0df20ecc31d817b 100644 (file)
@@ -75,8 +75,8 @@
        imx27-phycard-s-rdk {
                pinctrl_i2c1: i2c1grp {
                        fsl,pins = <
-                               MX27_PAD_I2C2_SDA__I2C2_SDA 0x0
-                               MX27_PAD_I2C2_SCL__I2C2_SCL 0x0
+                               MX27_PAD_I2C_DATA__I2C_DATA 0x0
+                               MX27_PAD_I2C_CLK__I2C_CLK 0x0
                        >;
                };
 
index 0d594e4bd559d46dbd6c29a382286fb12af341ea..a1173bf5bff5eb84485ef221a30c356b64ba0ec2 100644 (file)
@@ -38,7 +38,7 @@
 };
 
 &switch_ports {
-       /delete-node/ port@2;
+       /delete-node/ port@3;
 };
 
 &touchscreen {
index 95b8f2d7182144ba90d6fa0cce12712e42add476..fb0980190aa07e84499180e99d42580b9ca0805a 100644 (file)
        };
 };
 
-&clks {
-       assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
-       assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-                                <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
 &ldb {
        status = "okay";
 
index 611cb7ae7e556ec761af31bd7739c589d0405eac..8f762d9c5ae9962f68cce12c5b75938eca1716c4 100644 (file)
        };
 };
 
-&clks {
-       assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
-       assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-                                <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
 &ldb {
        status = "okay";
 
index e4cb118f88c6c4c7854a176be6ba9dc929ff3d79..1ea64ecf4291c4d661a4ed22b963a351de10585b 100644 (file)
        };
 };
 
-&clks {
-       assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
-                         <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
-                         <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
-       assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
-                                <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
-                                <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
-                                <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
-};
-
 &ldb {
        fsl,dual-channel;
        status = "okay";
index fa27dcdf06f1b3284f6d19812f48ee6f7cd4087b..1938b04199c484892153b9f0d7705935e5464bb1 100644 (file)
                #interrupt-cells = <1>;
        };
 };
+
+&clks {
+       assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+                         <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+                         <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
+                         <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
+                         <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
+                         <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
+       assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+                                <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+                                <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+                                <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+                                <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+                                <&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
+};
index ede2e0c999b1551aa3628adf1d3f1a38d2a335c0..e10f99278c77aa412f1751c87209286ef124df7a 100644 (file)
@@ -72,8 +72,6 @@
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index 15449c72c042b9e505cb54526f315dd392eb051f..b0ec14c421641aa0889a26ba6c3a5a303274a322 100644 (file)
        status = "okay";
 };
 
-&ssp3 {
+&ssp1 {
        status = "okay";
-       cs-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
+       cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
 
        firmware-flash@0 {
-               compatible = "st,m25p80", "jedec,spi-nor";
+               compatible = "winbond,w25q32", "jedec,spi-nor";
                reg = <0>;
-               spi-max-frequency = <40000000>;
+               spi-max-frequency = <104000000>;
                m25p,fast-read;
        };
 };
 
-&ssp4 {
-       cs-gpios = <&gpio 56 GPIO_ACTIVE_HIGH>;
+&ssp2 {
+       cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 9b5087a95e736861badbfc6d59829492243f0455..826f0a5778598776d1b8e9abb2f7c31ee3592a70 100644 (file)
                        };
 
                        hsic_phy0: hsic-phy@f0001800 {
-                               compatible = "marvell,mmp3-hsic-phy",
-                                            "usb-nop-xceiv";
+                               compatible = "marvell,mmp3-hsic-phy";
                                reg = <0xf0001800 0x40>;
                                #phy-cells = <0>;
                                status = "disabled";
                        };
 
                        hsic_phy1: hsic-phy@f0002800 {
-                               compatible = "marvell,mmp3-hsic-phy",
-                                            "usb-nop-xceiv";
+                               compatible = "marvell,mmp3-hsic-phy";
                                reg = <0xf0002800 0x40>;
                                #phy-cells = <0>;
                                status = "disabled";
                };
 
                soc_clocks: clocks@d4050000 {
-                       compatible = "marvell,mmp2-clock";
+                       compatible = "marvell,mmp3-clock";
                        reg = <0xd4050000 0x1000>,
                              <0xd4282800 0x400>,
                              <0xd4015000 0x1000>;
index 9067e0ef4240f846e1ff4b7cc2ac80fb246cbb53..06fbffa81636b3c4a8441833a091ef724d46a7ce 100644 (file)
 };
 
 &mmc3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc3_pins>;
        vmmc-supply = <&wl12xx_vmmc>;
        /* uart2_tx.sdmmc3_dat1 pad as wakeirq */
        interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
                >;
        };
 
+       /*
+        * Android uses PIN_OFF_INPUT_PULLDOWN | PIN_INPUT_PULLUP | MUX_MODE3
+        * for gpio_100, but the internal pull makes wlan flakey on some
+        * devices. Off mode value should be tested if we have off mode working
+        * later on.
+        */
+       mmc3_pins: pinmux_mmc3_pins {
+               pinctrl-single,pins = <
+               /* 0x4a10008e gpmc_wait2.gpio_100 d23 */
+               OMAP4_IOPAD(0x08e, PIN_INPUT | MUX_MODE3)
+
+               /* 0x4a100102 abe_mcbsp1_dx.sdmmc3_dat2 ab25 */
+               OMAP4_IOPAD(0x102, PIN_INPUT_PULLUP | MUX_MODE1)
+
+               /* 0x4a100104 abe_mcbsp1_fsx.sdmmc3_dat3 ac27 */
+               OMAP4_IOPAD(0x104, PIN_INPUT_PULLUP | MUX_MODE1)
+
+               /* 0x4a100118 uart2_cts.sdmmc3_clk ab26 */
+               OMAP4_IOPAD(0x118, PIN_INPUT | MUX_MODE1)
+
+               /* 0x4a10011a uart2_rts.sdmmc3_cmd ab27 */
+               OMAP4_IOPAD(0x11a, PIN_INPUT_PULLUP | MUX_MODE1)
+
+               /* 0x4a10011c uart2_rx.sdmmc3_dat0 aa25 */
+               OMAP4_IOPAD(0x11c, PIN_INPUT_PULLUP | MUX_MODE1)
+
+               /* 0x4a10011e uart2_tx.sdmmc3_dat1 aa26 */
+               OMAP4_IOPAD(0x11e, PIN_INPUT_PULLUP | MUX_MODE1)
+               >;
+       };
+
        /* gpmc_ncs0.gpio_50 */
        poweroff_gpio: pinmux_poweroff_pins {
                pinctrl-single,pins = <
 };
 
 /*
- * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
- * uart1 wakeirq.
+ * The uart1 port is wired to mdm6600 with rts and cts. The modem uses gpio_149
+ * for wake-up events for both the USB PHY and the UART. We can use gpio_149
+ * pad as the shared wakeirq for the UART rather than the RX or CTS pad as we
+ * have gpio_149 trigger before the UART transfer starts.
  */
 &uart1 {
        pinctrl-names = "default";
        pinctrl-0 = <&uart1_pins>;
        interrupts-extended = <&wakeupgen GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH
-                              &omap4_pmx_core 0xfc>;
+                              &omap4_pmx_core 0x110>;
+       uart-has-rtscts;
+       current-speed = <115200>;
 };
 
 &uart3 {
index 72b79770e336a79ef33cf222e51ed6d34b5e19fc..cace4380749711d7ccb0fd54a7ca1fc414a49f94 100644 (file)
                        reg = <0xe803b000 0x30>;
                        interrupts = <GIC_SPI 56 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&cpg CPG_MOD 36>;
-                       clock-names = "ostm0";
                        power-domains = <&cpg>;
                        status = "disabled";
                };
                        reg = <0xe803c000 0x30>;
                        interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&cpg CPG_MOD 35>;
-                       clock-names = "ostm1";
                        power-domains = <&cpg>;
                        status = "disabled";
                };
                        reg = <0xe803d000 0x30>;
                        interrupts = <GIC_SPI 58 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&cpg CPG_MOD 34>;
-                       clock-names = "ostm2";
                        power-domains = <&cpg>;
                        status = "disabled";
                };
index a5cd31229fbde835a759797c97dba449b5684ed5..a3ba722a9d7fee7739b8105235d79a1ee7f18e0e 100644 (file)
        cmt1: timer@e6130000 {
                compatible = "renesas,r8a73a4-cmt1", "renesas,rcar-gen2-cmt1";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
index ebc1ff64f530d42c21b818fec618ccdd8b4dc6d6..90feb2cf99601af9d33c514476387234e200b64d 100644 (file)
                cpg_clocks: cpg_clocks@e6150000 {
                        compatible = "renesas,r8a7740-cpg-clocks";
                        reg = <0xe6150000 0x10000>;
-                       clocks = <&extal1_clk>, <&extalr_clk>;
+                       clocks = <&extal1_clk>, <&extal2_clk>, <&extalr_clk>;
                        #clock-cells = <1>;
                        clock-output-names = "system", "pllc0", "pllc1",
                                             "pllc2", "r",
index 92aa26ba423c59584e4a35ceeef5c1d67f9f218b..b1f679da36b2ede19854f59ba9d087161e0f5d27 100644 (file)
@@ -84,8 +84,6 @@
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index 69745def44d4dcf179f3bcde6d6630db7cb86fba..bfe778c4c47bc18643f6fe0c5ba59c2064200ce1 100644 (file)
                        adi,input-depth = <8>;
                        adi,input-colorspace = "rgb";
                        adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
 
                        ports {
                                #address-cells = <1>;
index 4138efb2766d6e854a127b609d46870a27278b8b..6a457bc9280a4949fe750c9aab566a06ca9e0915 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index 687167b70cb62f366ef136ce5cddd01d73014b1a..fc74c6cd6def57184dd9e9e108f969aed9907ce4 100644 (file)
                        adi,input-depth = <8>;
                        adi,input-colorspace = "rgb";
                        adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
 
                        ports {
                                #address-cells = <1>;
index a8e0335148a549fb3cc0b8847aeb3c07cb32c159..114bf1c4199b6aa42f7d60212fddea1a3ddf8a4b 100644 (file)
                        adi,input-depth = <8>;
                        adi,input-colorspace = "rgb";
                        adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
 
                        ports {
                                #address-cells = <1>;
index 248eb717eb3500b6d5963b76a395cf7b05fbb572..9368ac2cf5082ff99d692c5c79ee1b44f6e7787e 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index bd2a63bdab3d650e20c9d3fa0fd00a576b0047af..ba2d2a58901261f6bf29f8eaf48324046be103de 100644 (file)
         */
        hdmi@3d {
                compatible = "adi,adv7513";
-               reg = <0x3d>, <0x2d>, <0x4d>, <0x5d>;
-               reg-names = "main", "cec", "edid", "packet";
+               reg = <0x3d>, <0x4d>, <0x2d>, <0x5d>;
+               reg-names = "main", "edid", "cec", "packet";
 
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
 
        hdmi@39 {
                compatible = "adi,adv7513";
-               reg = <0x39>, <0x29>, <0x49>, <0x59>;
-               reg-names = "main", "cec", "edid", "packet";
+               reg = <0x39>, <0x49>, <0x29>, <0x59>;
+               reg-names = "main", "edid", "cec", "packet";
 
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index cfe06a74ce894a133fa511f01b0b85d71e7349a6..79baf06019f5a6ff2f186cc38ef26efeb54724b2 100644 (file)
                        adi,input-depth = <8>;
                        adi,input-colorspace = "rgb";
                        adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
 
                        ports {
                                #address-cells = <1>;
index 9aaa96ea994307c4f470752018708efd72444684..b8b0941f677c1cd4ed9fda1b5d4b9f90e8074843 100644 (file)
                        adi,input-depth = <8>;
                        adi,input-colorspace = "rgb";
                        adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
 
                        ports {
                                #address-cells = <1>;
index 781ac7583522d22c9fe5cb6f37efe36e35c4def7..d9a0c9a29b681c2715ceb71988bdb9527043f635 100644 (file)
                assigned-clocks = <&cru SCLK_GPU>;
                assigned-clock-rates = <100000000>;
                clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
-               clock-names = "core", "bus";
+               clock-names = "bus", "core";
                resets = <&cru SRST_GPU>;
                status = "disabled";
        };
index 5670b33fd1bd0a4c431fc138bc77a0ccdc128714..aed879db6c15211f9d5e0a420533b7b0ce87c852 100644 (file)
@@ -46,7 +46,7 @@
                #address-cells = <1>;
                #size-cells = <0>;
 
-               phy: phy@0 {
+               phy: ethernet-phy@0 {
                        compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
                        reg = <0>;
                        clocks = <&cru SCLK_MAC_PHY>;
index 679fc2b00e5ac37064283121e2438b3dca0afcdb..933ef69da32ac19096a1b4c5276bad64163a4be1 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               phy: phy@0 {
+               phy: ethernet-phy@0 {
                        compatible = "ethernet-phy-id1234.d400",
                                     "ethernet-phy-ieee802.3-c22";
                        reg = <0>;
index 06172ebbf0cec3e484b34a9de4a9a2ad09148f02..5485a9918da677bfe5bb04afb136ce563e261460 100644 (file)
                                  "pp1",
                                  "ppmmu1";
                clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
-               clock-names = "core", "bus";
+               clock-names = "bus", "core";
                resets = <&cru SRST_GPU_A>;
                status = "disabled";
        };
                        };
                };
 
-               spi-0 {
+               spi0 {
                        spi0_clk: spi0-clk {
                                rockchip,pins = <0 RK_PB1 2 &pcfg_pull_up>;
                        };
                        };
                };
 
-               spi-1 {
+               spi1 {
                        spi1_clk: spi1-clk {
                                rockchip,pins = <0 RK_PC7 2 &pcfg_pull_up>;
                        };
index f9fcb7e9657bcdef1a5edbedd732e0b68916ba85..d929b60517ab2c5e4442117b267592d2c1d3d6a7 100644 (file)
@@ -84,7 +84,7 @@
                compatible = "arm,mali-400";
                reg = <0x10090000 0x10000>;
                clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
-               clock-names = "core", "bus";
+               clock-names = "bus", "core";
                assigned-clocks = <&cru ACLK_GPU>;
                assigned-clock-rates = <100000000>;
                resets = <&cru SRST_GPU>;
index 11e2211f9007759d211feabb0d4e82a0e089a60a..84a3b055f2537360b58846c3551750bc1ef1dfca 100644 (file)
@@ -147,6 +147,7 @@ CONFIG_I2C_DAVINCI=y
 CONFIG_SPI=y
 CONFIG_SPI_DAVINCI=y
 CONFIG_SPI_SPIDEV=y
+CONFIG_PTP_1588_CLOCK=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
index 3cc3ca5fa0277e448ae22c59fcaa8147d8acc078..8b83d4a5d309c6a934a5f471638b0c909c690b2f 100644 (file)
@@ -274,6 +274,7 @@ CONFIG_SPI_TI_QSPI=m
 CONFIG_HSI=m
 CONFIG_OMAP_SSI=m
 CONFIG_SSI_PROTOCOL=m
+CONFIG_PTP_1588_CLOCK=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
index 6fdb0ac62b3d85076824ecbb93eae1139bd79a57..59da6c0b63b62bc0e6015fa1b1ccfc8d74e8b9d6 100644 (file)
@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
                return;
        }
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index ae5aefc44a4d766531118102f1f836394e4a7b12..ffa8d73fe722c231720e86d3d6aeb91ab283754b 100644 (file)
@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_neon_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
index ceec04ec2f4054eed9f2d8eea12dd87ce0585e72..13cfef4ae22e312dcb9f6b032f9d5c6bfbb27ee9 100644 (file)
@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && do_neon) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks_arm(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index 99929122dad7529b5be4daeb8321dbf7b555c79a..3546d294d55faef787562bb17e486d9f506336a8 100644 (file)
 #endif
 
 #include <asm/ptrace.h>
-#include <asm/domain.h>
 #include <asm/opcodes-virt.h>
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
 
 #define IOMEM(x)       (x)
 
@@ -446,79 +446,6 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
        .size \name , . - \name
        .endm
 
-       .macro  csdb
-#ifdef CONFIG_THUMB2_KERNEL
-       .inst.w 0xf3af8014
-#else
-       .inst   0xe320f014
-#endif
-       .endm
-
-       .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
-       adds    \tmp, \addr, #\size - 1
-       sbcscc  \tmp, \tmp, \limit
-       bcs     \bad
-#ifdef CONFIG_CPU_SPECTRE
-       movcs   \addr, #0
-       csdb
-#endif
-#endif
-       .endm
-
-       .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
-#ifdef CONFIG_CPU_SPECTRE
-       sub     \tmp, \limit, #1
-       subs    \tmp, \tmp, \addr       @ tmp = limit - 1 - addr
-       addhs   \tmp, \tmp, #1          @ if (tmp >= 0) {
-       subshs  \tmp, \tmp, \size       @ tmp = limit - (addr + size) }
-       movlo   \addr, #0               @ if (tmp < 0) addr = NULL
-       csdb
-#endif
-       .endm
-
-       .macro  uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
-       /*
-        * Whenever we re-enter userspace, the domains should always be
-        * set appropriately.
-        */
-       mov     \tmp, #DACR_UACCESS_DISABLE
-       mcr     p15, 0, \tmp, c3, c0, 0         @ Set domain register
-       .if     \isb
-       instr_sync
-       .endif
-#endif
-       .endm
-
-       .macro  uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
-       /*
-        * Whenever we re-enter userspace, the domains should always be
-        * set appropriately.
-        */
-       mov     \tmp, #DACR_UACCESS_ENABLE
-       mcr     p15, 0, \tmp, c3, c0, 0
-       .if     \isb
-       instr_sync
-       .endif
-#endif
-       .endm
-
-       .macro  uaccess_save, tmp
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
-       mrc     p15, 0, \tmp, c3, c0, 0
-       str     \tmp, [sp, #SVC_DACR]
-#endif
-       .endm
-
-       .macro  uaccess_restore
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
-       ldr     r0, [sp, #SVC_DACR]
-       mcr     p15, 0, r0, c3, c0, 0
-#endif
-       .endm
-
        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
        .macro  ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
index e133da303a988b6235f4f60be24025c91f7af340..a9151884bc85976231178b2612422f0d5a8cc03d 100644 (file)
@@ -165,8 +165,13 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
        preempt_enable();
 #endif
 
-       if (!ret)
-               *oval = oldval;
+       /*
+        * Store unconditionally. If ret != 0 the extra store is the least
+        * of the worries but GCC cannot figure out that __futex_atomic_op()
+        * is either setting ret to -EFAULT or storing the old value in
+        * oldval which results in a uninitialized warning at the call site.
+        */
+       *oval = oldval;
 
        return ret;
 }
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
new file mode 100644 (file)
index 0000000..907571f
--- /dev/null
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+
+       .macro  csdb
+#ifdef CONFIG_THUMB2_KERNEL
+       .inst.w 0xf3af8014
+#else
+       .inst   0xe320f014
+#endif
+       .endm
+
+       .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+       adds    \tmp, \addr, #\size - 1
+       sbcscc  \tmp, \tmp, \limit
+       bcs     \bad
+#ifdef CONFIG_CPU_SPECTRE
+       movcs   \addr, #0
+       csdb
+#endif
+#endif
+       .endm
+
+       .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+       sub     \tmp, \limit, #1
+       subs    \tmp, \tmp, \addr       @ tmp = limit - 1 - addr
+       addhs   \tmp, \tmp, #1          @ if (tmp >= 0) {
+       subshs  \tmp, \tmp, \size       @ tmp = limit - (addr + size) }
+       movlo   \addr, #0               @ if (tmp < 0) addr = NULL
+       csdb
+#endif
+       .endm
+
+       .macro  uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_DISABLE
+       mcr     p15, 0, \tmp, c3, c0, 0         @ Set domain register
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+       .macro  uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_ENABLE
+       mcr     p15, 0, \tmp, c3, c0, 0
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...)     x
+#else
+#define DACR(x...)
+#endif
+
+       /*
+        * Save the address limit on entry to a privileged exception.
+        *
+        * If we are using the DACR for kernel access by the user accessors
+        * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+        * back to client mode, whether or not \disable is set.
+        *
+        * If we are using SW PAN, set the DACR user domain to no access
+        * if \disable is set.
+        */
+       .macro  uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+       ldr     \tmp1, [\tsk, #TI_ADDR_LIMIT]
+       mov     \tmp2, #TASK_SIZE
+       str     \tmp2, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mrc     p15, 0, \tmp0, c3, c0, 0)
+ DACR( str     \tmp0, [sp, #SVC_DACR])
+       str     \tmp1, [sp, #SVC_ADDR_LIMIT]
+       .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+       /* kernel=client, user=no access */
+       mov     \tmp2, #DACR_UACCESS_DISABLE
+       mcr     p15, 0, \tmp2, c3, c0, 0
+       instr_sync
+       .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+       /* kernel=client */
+       bic     \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+       orr     \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+       mcr     p15, 0, \tmp2, c3, c0, 0
+       instr_sync
+       .endif
+       .endm
+
+       /* Restore the user access state previously saved by uaccess_entry */
+       .macro  uaccess_exit, tsk, tmp0, tmp1
+       ldr     \tmp1, [sp, #SVC_ADDR_LIMIT]
+ DACR( ldr     \tmp0, [sp, #SVC_DACR])
+       str     \tmp1, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mcr     p15, 0, \tmp0, c3, c0, 0)
+       .endm
+
+#undef DACR
+
+#endif /* __ASM_UACCESS_ASM_H__ */
index 4247ebf4b8934720a992b5e432bf5ed6bb260423..3c2faf2bd124e8f6383c247cf0ddff1f72a32dd0 100644 (file)
@@ -42,7 +42,7 @@ static int __init init_atags_procfs(void)
        size_t size;
 
        if (tag->hdr.tag != ATAG_CORE) {
-               pr_info("No ATAGs?");
+               pr_info("No ATAGs?\n");
                return -EINVAL;
        }
 
index 77f54830554c32599aa5de9a0e0fb52c4de5c5a6..55a47df0477383df6fd6447f89447a8ebd5598f4 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/unistd.h>
 #include <asm/tls.h>
 #include <asm/system_info.h>
+#include <asm/uaccess-asm.h>
 
 #include "entry-header.S"
 #include <asm/entry-macro-multi.S>
@@ -179,15 +180,7 @@ ENDPROC(__und_invalid)
        stmia   r7, {r2 - r6}
 
        get_thread_info tsk
-       ldr     r0, [tsk, #TI_ADDR_LIMIT]
-       mov     r1, #TASK_SIZE
-       str     r1, [tsk, #TI_ADDR_LIMIT]
-       str     r0, [sp, #SVC_ADDR_LIMIT]
-
-       uaccess_save r0
-       .if \uaccess
-       uaccess_disable r0
-       .endif
+       uaccess_entry tsk, r0, r1, r2, \uaccess
 
        .if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
index 32051ec5b33fa3dc41eb9c26e063cba7d502c048..40db0f9188b69e9e4323405c8c44e9a3fe153890 100644 (file)
@@ -6,6 +6,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/errno.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
 #include <asm/v7m.h>
 
 @ Bad Abort numbers
        blne    trace_hardirqs_off
 #endif
        .endif
-       ldr     r1, [sp, #SVC_ADDR_LIMIT]
-       uaccess_restore
-       str     r1, [tsk, #TI_ADDR_LIMIT]
+       uaccess_exit tsk, r0, r1
 
 #ifndef CONFIG_THUMB2_KERNEL
        @ ARM mode SVC restore
        @ on the stack remains correct).
        @
        .macro  svc_exit_via_fiq
-       ldr     r1, [sp, #SVC_ADDR_LIMIT]
-       uaccess_restore
-       str     r1, [tsk, #TI_ADDR_LIMIT]
+       uaccess_exit tsk, r0, r1
 #ifndef CONFIG_THUMB2_KERNEL
        @ ARM mode restore
        mov     r0, sp
index b606cded90cd52f1c49f864e8013908290dc0fcb..4cc6a7eff6359a23633587742bef6e3690e76ecf 100644 (file)
@@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = {
 };
 
 static struct undef_hook thumb_break_hook = {
-       .instr_mask     = 0xffff,
-       .instr_val      = 0xde01,
+       .instr_mask     = 0xffffffff,
+       .instr_val      = 0x0000de01,
        .cpsr_mask      = PSR_T_BIT,
        .cpsr_val       = PSR_T_BIT,
        .fn             = break_trap,
index ab35275b7ee3f3394edd9700bc38166a4cfb4f71..f0a50b9e61dfdf4e266dc0ff756309a5543473f9 100644 (file)
@@ -27,7 +27,8 @@ static void __iomem *gic_cpu_ctrl;
 #define GIC_CPU_CTRL                   0x00
 #define GIC_CPU_CTRL_ENABLE            1
 
-int __init ox820_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int __init ox820_boot_secondary(unsigned int cpu,
+               struct task_struct *idle)
 {
        /*
         * Write the address of secondary startup into the
index 40fb05d96c6072c9357cf69965ca006c0a5fdb27..5d513f461957b5e416113bada2688a8ee7f34179 100644 (file)
@@ -20,6 +20,7 @@ config ARM64
        select ARCH_HAS_KCOV
        select ARCH_HAS_KEEPINITRD
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PTE_DEVMAP
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SETUP_DMA_OPS
index 316e8a44391321dd592c6b05809e22b22f60092b..dc4ab6b434f9700ab855bd0f1b6fe1dd57ca94e1 100644 (file)
@@ -98,7 +98,7 @@
 };
 
 &codec_analog {
-       hpvcc-supply = <&reg_eldo1>;
+       cpvdd-supply = <&reg_eldo1>;
        status = "okay";
 };
 
index 31143fe64d91ff38acd52348dbead36c94358d7c..c26cc1fcaffdd402b166813e9f69f1e3c4d41c79 100644 (file)
                };
        };
 
-       sound_spdif {
-               compatible = "simple-audio-card";
-               simple-audio-card,name = "On-board SPDIF";
-
-               simple-audio-card,cpu {
-                       sound-dai = <&spdif>;
-               };
-
-               simple-audio-card,codec {
-                       sound-dai = <&spdif_out>;
-               };
-       };
-
-       spdif_out: spdif-out {
-               #sound-dai-cells = <0>;
-               compatible = "linux,spdif-dit";
-       };
-
        timer {
                compatible = "arm,armv8-timer";
                allwinner,erratum-unknown1;
index 0882ea215b88f3a600f2b581029b7c852bf92e06..c0aef7d6911705bbbe1db500cc55337c0ff9744a 100644 (file)
                                reg = <0x0 0xff400000 0x0 0x40000>;
                                interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
-                               clock-names = "ddr";
+                               clock-names = "otg";
                                phys = <&usb2_phy1>;
                                phy-names = "usb2-phy";
                                dr_mode = "peripheral";
index 783e5a397f863273cd4f17f23df332acde68f90d..55d39020ec72f5ad28dcdd5c9de243d9024eaccc 100644 (file)
@@ -1,4 +1,3 @@
-
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
 /*
  * Copyright (c) 2019 BayLibre, SAS
index c33e85fbdaba1f1de25c7335a61970e0d44c8677..c6c8caed8327b12130d4233d757c57ef7cd7b9ca 100644 (file)
        clock-latency = <50000>;
 };
 
+&frddr_a {
+       status = "okay";
+};
+
 &frddr_b {
        status = "okay";
 };
index 325e448eb09c3ee43bedb2ae3c895012c6381242..06c5430eb92d1896cdda303eabcc9f0dacd64340 100644 (file)
 &usb {
        status = "okay";
        dr_mode = "host";
-       vbus-regulator = <&usb_pwr_en>;
+       vbus-supply = <&usb_pwr_en>;
 };
 
 &usb2_phy0 {
index 2a7f70b711498d508092c1d9bbbbe9ff9d9bc199..13d0570c7ed6d8dfc51c20ade02ee3e7ca32efaa 100644 (file)
 
                edma0: dma-controller@22c0000 {
                        #dma-cells = <2>;
-                       compatible = "fsl,ls1028a-edma";
+                       compatible = "fsl,ls1028a-edma", "fsl,vf610-edma";
                        reg = <0x0 0x22c0000 0x0 0x10000>,
                              <0x0 0x22d0000 0x0 0x10000>,
                              <0x0 0x22e0000 0x0 0x10000>;
index cc7152ecedd93aa5ea41340b7362fbac0feb43dd..8829628f757a6c7a1b6d727a3f069447e8c2c5cd 100644 (file)
 
                aips1: bus@30000000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x301f0000 0x10000>;
+                       reg = <0x30000000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30000000 0x30000000 0x400000>;
 
                aips2: bus@30400000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x305f0000 0x10000>;
+                       reg = <0x30400000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30400000 0x30400000 0x400000>;
 
                aips3: bus@30800000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x309f0000 0x10000>;
+                       reg = <0x30800000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30800000 0x30800000 0x400000>,
 
                aips4: bus@32c00000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x32df0000 0x10000>;
+                       reg = <0x32c00000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x32c00000 0x32c00000 0x400000>;
index fa78f0163270a3e8d8a8c59a358c13952ddda768..43971abe218b1baf9ef8bbfeb85063072ed15a09 100644 (file)
 
                aips1: bus@30000000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x301f0000 0x10000>;
+                       reg = <0x30000000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
                aips2: bus@30400000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x305f0000 0x10000>;
+                       reg = <0x30400000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
                aips3: bus@30800000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x309f0000 0x10000>;
+                       reg = <0x30800000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
                                reg = <0x30bd0000 0x10000>;
                                interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>,
-                                        <&clk IMX8MN_CLK_SDMA1_ROOT>;
+                                        <&clk IMX8MN_CLK_AHB>;
                                clock-names = "ipg", "ahb";
                                #dma-cells = <3>;
                                fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
 
                aips4: bus@32c00000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x32df0000 0x10000>;
+                       reg = <0x32c00000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index da78f89b6c98be61e8fe42b44e7500c8bffd8995..319ab34cab3e5e7e37e6048fc54f704226c6acd8 100644 (file)
 #define MX8MP_IOMUXC_ENET_TXC__SIM_M_HADDR22                         0x070 0x2D0 0x000 0x7 0x0
 #define MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL              0x074 0x2D4 0x000 0x0 0x0
 #define MX8MP_IOMUXC_ENET_RX_CTL__AUDIOMIX_SAI7_TX_SYNC              0x074 0x2D4 0x540 0x2 0x0
-#define MX8MP_IOMUXC_ENET_RX_CTL__AUDIOMIX_BIT_STREAM03              0x074 0x2D4 0x4CC 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RX_CTL__AUDIOMIX_BIT_STREAM03              0x074 0x2D4 0x4CC 0x3 0x1
 #define MX8MP_IOMUXC_ENET_RX_CTL__GPIO1_IO24                         0x074 0x2D4 0x000 0x5 0x0
 #define MX8MP_IOMUXC_ENET_RX_CTL__USDHC3_DATA2                       0x074 0x2D4 0x618 0x6 0x0
 #define MX8MP_IOMUXC_ENET_RX_CTL__SIM_M_HADDR23                      0x074 0x2D4 0x000 0x7 0x0
 #define MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK    0x078 0x2D8 0x000 0x0 0x0
 #define MX8MP_IOMUXC_ENET_RXC__ENET_QOS_RX_ER                        0x078 0x2D8 0x000 0x1 0x0
 #define MX8MP_IOMUXC_ENET_RXC__AUDIOMIX_SAI7_TX_BCLK                 0x078 0x2D8 0x53C 0x2 0x0
-#define MX8MP_IOMUXC_ENET_RXC__AUDIOMIX_BIT_STREAM02                 0x078 0x2D8 0x4C8 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RXC__AUDIOMIX_BIT_STREAM02                 0x078 0x2D8 0x4C8 0x3 0x1
 #define MX8MP_IOMUXC_ENET_RXC__GPIO1_IO25                            0x078 0x2D8 0x000 0x5 0x0
 #define MX8MP_IOMUXC_ENET_RXC__USDHC3_DATA3                          0x078 0x2D8 0x61C 0x6 0x0
 #define MX8MP_IOMUXC_ENET_RXC__SIM_M_HADDR24                         0x078 0x2D8 0x000 0x7 0x0
 #define MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0                    0x07C 0x2DC 0x000 0x0 0x0
 #define MX8MP_IOMUXC_ENET_RD0__AUDIOMIX_SAI7_RX_DATA00               0x07C 0x2DC 0x534 0x2 0x0
-#define MX8MP_IOMUXC_ENET_RD0__AUDIOMIX_BIT_STREAM01                 0x07C 0x2DC 0x4C4 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD0__AUDIOMIX_BIT_STREAM01                 0x07C 0x2DC 0x4C4 0x3 0x1
 #define MX8MP_IOMUXC_ENET_RD0__GPIO1_IO26                            0x07C 0x2DC 0x000 0x5 0x0
 #define MX8MP_IOMUXC_ENET_RD0__USDHC3_DATA4                          0x07C 0x2DC 0x620 0x6 0x0
 #define MX8MP_IOMUXC_ENET_RD0__SIM_M_HADDR25                         0x07C 0x2DC 0x000 0x7 0x0
 #define MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1                    0x080 0x2E0 0x000 0x0 0x0
 #define MX8MP_IOMUXC_ENET_RD1__AUDIOMIX_SAI7_RX_SYNC                 0x080 0x2E0 0x538 0x2 0x0
-#define MX8MP_IOMUXC_ENET_RD1__AUDIOMIX_BIT_STREAM00                 0x080 0x2E0 0x4C0 0x3 0x0
+#define MX8MP_IOMUXC_ENET_RD1__AUDIOMIX_BIT_STREAM00                 0x080 0x2E0 0x4C0 0x3 0x1
 #define MX8MP_IOMUXC_ENET_RD1__GPIO1_IO27                            0x080 0x2E0 0x000 0x5 0x0
 #define MX8MP_IOMUXC_ENET_RD1__USDHC3_RESET_B                        0x080 0x2E0 0x000 0x6 0x0
 #define MX8MP_IOMUXC_ENET_RD1__SIM_M_HADDR26                         0x080 0x2E0 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SD2_DATA0__I2C4_SDA                             0x0C8 0x328 0x5C0 0x2 0x1
 #define MX8MP_IOMUXC_SD2_DATA0__UART2_DCE_RX                         0x0C8 0x328 0x5F0 0x3 0x2
 #define MX8MP_IOMUXC_SD2_DATA0__UART2_DTE_TX                         0x0C8 0x328 0x000 0x3 0x0
-#define MX8MP_IOMUXC_SD2_DATA0__AUDIOMIX_BIT_STREAM00                0x0C8 0x328 0x4C0 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA0__AUDIOMIX_BIT_STREAM00                0x0C8 0x328 0x4C0 0x4 0x2
 #define MX8MP_IOMUXC_SD2_DATA0__GPIO2_IO15                           0x0C8 0x328 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SD2_DATA0__CCMSRCGPCMIX_OBSERVE2                0x0C8 0x328 0x000 0x6 0x0
 #define MX8MP_IOMUXC_SD2_DATA0__OBSERVE_MUX_OUT02                    0x0C8 0x328 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3                         0x0D4 0x334 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SD2_DATA3__ECSPI2_MISO                          0x0D4 0x334 0x56C 0x2 0x0
 #define MX8MP_IOMUXC_SD2_DATA3__AUDIOMIX_SPDIF_IN                    0x0D4 0x334 0x544 0x3 0x1
-#define MX8MP_IOMUXC_SD2_DATA3__AUDIOMIX_BIT_STREAM03                0x0D4 0x334 0x4CC 0x4 0x1
+#define MX8MP_IOMUXC_SD2_DATA3__AUDIOMIX_BIT_STREAM03                0x0D4 0x334 0x4CC 0x4 0x2
 #define MX8MP_IOMUXC_SD2_DATA3__GPIO2_IO18                           0x0D4 0x334 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SD2_DATA3__CCMSRCGPCMIX_EARLY_RESET             0x0D4 0x334 0x000 0x6 0x0
 #define MX8MP_IOMUXC_SD2_RESET_B__USDHC2_RESET_B                     0x0D8 0x338 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_SAI1_TX_DATA02              0x134 0x394 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI5_RXD0__PWM2_OUT                             0x134 0x394 0x000 0x2 0x0
 #define MX8MP_IOMUXC_SAI5_RXD0__I2C5_SCL                             0x134 0x394 0x5C4 0x3 0x1
-#define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_BIT_STREAM00                0x134 0x394 0x4C0 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD0__AUDIOMIX_BIT_STREAM00                0x134 0x394 0x4C0 0x4 0x3
 #define MX8MP_IOMUXC_SAI5_RXD0__GPIO3_IO21                           0x134 0x394 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_RX_DATA01              0x138 0x398 0x4FC 0x0 0x0
 #define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI1_TX_DATA03              0x138 0x398 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI1_TX_SYNC                0x138 0x398 0x4D8 0x2 0x0
 #define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_SAI5_TX_SYNC                0x138 0x398 0x510 0x3 0x0
-#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_BIT_STREAM01                0x138 0x398 0x4C4 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD1__AUDIOMIX_BIT_STREAM01                0x138 0x398 0x4C4 0x4 0x3
 #define MX8MP_IOMUXC_SAI5_RXD1__GPIO3_IO22                           0x138 0x398 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI5_RXD1__CAN1_TX                              0x138 0x398 0x000 0x6 0x0
 #define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_RX_DATA02              0x13C 0x39C 0x500 0x0 0x0
 #define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI1_TX_DATA04              0x13C 0x39C 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI1_TX_SYNC                0x13C 0x39C 0x4D8 0x2 0x1
 #define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_SAI5_TX_BCLK                0x13C 0x39C 0x50C 0x3 0x0
-#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_BIT_STREAM02                0x13C 0x39C 0x4C8 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD2__AUDIOMIX_BIT_STREAM02                0x13C 0x39C 0x4C8 0x4 0x3
 #define MX8MP_IOMUXC_SAI5_RXD2__GPIO3_IO23                           0x13C 0x39C 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI5_RXD2__CAN1_RX                              0x13C 0x39C 0x54C 0x6 0x0
 #define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_RX_DATA03              0x140 0x3A0 0x504 0x0 0x0
 #define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI1_TX_DATA05              0x140 0x3A0 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI1_TX_SYNC                0x140 0x3A0 0x4D8 0x2 0x2
 #define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_SAI5_TX_DATA00              0x140 0x3A0 0x000 0x3 0x0
-#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_BIT_STREAM03                0x140 0x3A0 0x4CC 0x4 0x2
+#define MX8MP_IOMUXC_SAI5_RXD3__AUDIOMIX_BIT_STREAM03                0x140 0x3A0 0x4CC 0x4 0x3
 #define MX8MP_IOMUXC_SAI5_RXD3__GPIO3_IO24                           0x140 0x3A0 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI5_RXD3__CAN2_TX                              0x140 0x3A0 0x000 0x6 0x0
 #define MX8MP_IOMUXC_SAI5_MCLK__AUDIOMIX_SAI5_MCLK                   0x144 0x3A4 0x4F0 0x0 0x0
 #define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI1_RX_DATA00              0x150 0x3B0 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI5_RX_DATA00              0x150 0x3B0 0x4F8 0x1 0x1
 #define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_SAI1_TX_DATA01              0x150 0x3B0 0x000 0x2 0x0
-#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_BIT_STREAM00                0x150 0x3B0 0x4C0 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD0__AUDIOMIX_BIT_STREAM00                0x150 0x3B0 0x4C0 0x3 0x4
 #define MX8MP_IOMUXC_SAI1_RXD0__ENET1_1588_EVENT1_IN                 0x150 0x3B0 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI1_RXD0__GPIO4_IO02                           0x150 0x3B0 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_SAI1_RX_DATA01              0x154 0x3B4 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_SAI5_RX_DATA01              0x154 0x3B4 0x4FC 0x1 0x1
-#define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_BIT_STREAM01                0x154 0x3B4 0x4C4 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD1__AUDIOMIX_BIT_STREAM01                0x154 0x3B4 0x4C4 0x3 0x4
 #define MX8MP_IOMUXC_SAI1_RXD1__ENET1_1588_EVENT1_OUT                0x154 0x3B4 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI1_RXD1__GPIO4_IO03                           0x154 0x3B4 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_SAI1_RX_DATA02              0x158 0x3B8 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_SAI5_RX_DATA02              0x158 0x3B8 0x500 0x1 0x1
-#define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_BIT_STREAM02                0x158 0x3B8 0x4C8 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD2__AUDIOMIX_BIT_STREAM02                0x158 0x3B8 0x4C8 0x3 0x4
 #define MX8MP_IOMUXC_SAI1_RXD2__ENET1_MDC                            0x158 0x3B8 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI1_RXD2__GPIO4_IO04                           0x158 0x3B8 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_SAI1_RX_DATA03              0x15C 0x3BC 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_SAI5_RX_DATA03              0x15C 0x3BC 0x504 0x1 0x1
-#define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_BIT_STREAM03                0x15C 0x3BC 0x4CC 0x3 0x3
+#define MX8MP_IOMUXC_SAI1_RXD3__AUDIOMIX_BIT_STREAM03                0x15C 0x3BC 0x4CC 0x3 0x4
 #define MX8MP_IOMUXC_SAI1_RXD3__ENET1_MDIO                           0x15C 0x3BC 0x57C 0x4 0x1
 #define MX8MP_IOMUXC_SAI1_RXD3__GPIO4_IO05                           0x15C 0x3BC 0x000 0x5 0x0
 #define MX8MP_IOMUXC_SAI1_RXD4__AUDIOMIX_SAI1_RX_DATA04              0x160 0x3C0 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_RXFS__UART1_DCE_TX                         0x19C 0x3FC 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI2_RXFS__UART1_DTE_RX                         0x19C 0x3FC 0x5E8 0x4 0x2
 #define MX8MP_IOMUXC_SAI2_RXFS__GPIO4_IO21                           0x19C 0x3FC 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_BIT_STREAM02                0x19C 0x3FC 0x4C8 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXFS__AUDIOMIX_BIT_STREAM02                0x19C 0x3FC 0x4C8 0x6 0x5
 #define MX8MP_IOMUXC_SAI2_RXFS__SIM_M_HSIZE00                        0x19C 0x3FC 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_SAI2_RX_BCLK                 0x1A0 0x400 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_SAI5_TX_BCLK                 0x1A0 0x400 0x50C 0x1 0x2
 #define MX8MP_IOMUXC_SAI2_RXC__UART1_DCE_RX                          0x1A0 0x400 0x5E8 0x4 0x3
 #define MX8MP_IOMUXC_SAI2_RXC__UART1_DTE_TX                          0x1A0 0x400 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI2_RXC__GPIO4_IO22                            0x1A0 0x400 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_BIT_STREAM01                 0x1A0 0x400 0x4C4 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXC__AUDIOMIX_BIT_STREAM01                 0x1A0 0x400 0x4C4 0x6 0x5
 #define MX8MP_IOMUXC_SAI2_RXC__SIM_M_HSIZE01                         0x1A0 0x400 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI2_RX_DATA00              0x1A4 0x404 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_SAI5_TX_DATA00              0x1A4 0x404 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI2_RXD0__UART1_DCE_RTS                        0x1A4 0x404 0x5E4 0x4 0x2
 #define MX8MP_IOMUXC_SAI2_RXD0__UART1_DTE_CTS                        0x1A4 0x404 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI2_RXD0__GPIO4_IO23                           0x1A4 0x404 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_BIT_STREAM03                0x1A4 0x404 0x4CC 0x6 0x4
+#define MX8MP_IOMUXC_SAI2_RXD0__AUDIOMIX_BIT_STREAM03                0x1A4 0x404 0x4CC 0x6 0x5
 #define MX8MP_IOMUXC_SAI2_RXD0__SIM_M_HSIZE02                        0x1A4 0x404 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC                0x1A8 0x408 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI5_TX_DATA01              0x1A8 0x408 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI2_TXFS__UART1_DCE_CTS                        0x1A8 0x408 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI2_TXFS__UART1_DTE_RTS                        0x1A8 0x408 0x5E4 0x4 0x3
 #define MX8MP_IOMUXC_SAI2_TXFS__GPIO4_IO24                           0x1A8 0x408 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_BIT_STREAM02                0x1A8 0x408 0x4C8 0x6 0x5
+#define MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_BIT_STREAM02                0x1A8 0x408 0x4C8 0x6 0x6
 #define MX8MP_IOMUXC_SAI2_TXFS__SIM_M_HWRITE                         0x1A8 0x408 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK                 0x1AC 0x40C 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI5_TX_DATA02               0x1AC 0x40C 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI2_TXC__CAN1_RX                               0x1AC 0x40C 0x54C 0x3 0x1
 #define MX8MP_IOMUXC_SAI2_TXC__GPIO4_IO25                            0x1AC 0x40C 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_BIT_STREAM01                 0x1AC 0x40C 0x4C4 0x6 0x5
+#define MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_BIT_STREAM01                 0x1AC 0x40C 0x4C4 0x6 0x6
 #define MX8MP_IOMUXC_SAI2_TXC__SIM_M_HREADYOUT                       0x1AC 0x40C 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00              0x1B0 0x410 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI5_TX_DATA03              0x1B0 0x410 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SAI3_RX_DATA01              0x1B8 0x418 0x000 0x3 0x0
 #define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_SPDIF_IN                    0x1B8 0x418 0x544 0x4 0x2
 #define MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28                           0x1B8 0x418 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_BIT_STREAM00                0x1B8 0x418 0x4C0 0x6 0x4
+#define MX8MP_IOMUXC_SAI3_RXFS__AUDIOMIX_BIT_STREAM00                0x1B8 0x418 0x4C0 0x6 0x5
 #define MX8MP_IOMUXC_SAI3_RXFS__TPSMP_HTRANS00                       0x1B8 0x418 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI3_RX_BCLK                 0x1BC 0x41C 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI3_RXC__AUDIOMIX_SAI2_RX_DATA02               0x1BC 0x41C 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI3_RXD__UART2_DCE_RTS                         0x1C0 0x420 0x5EC 0x4 0x3
 #define MX8MP_IOMUXC_SAI3_RXD__UART2_DTE_CTS                         0x1C0 0x420 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI3_RXD__GPIO4_IO30                            0x1C0 0x420 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_BIT_STREAM01                 0x1C0 0x420 0x4C4 0x6 0x6
+#define MX8MP_IOMUXC_SAI3_RXD__AUDIOMIX_BIT_STREAM01                 0x1C0 0x420 0x4C4 0x6 0x7
 #define MX8MP_IOMUXC_SAI3_RXD__TPSMP_HDATA00                         0x1C0 0x420 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI3_TX_SYNC                0x1C4 0x424 0x4EC 0x0 0x1
 #define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_SAI2_TX_DATA01              0x1C4 0x424 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI3_TXFS__UART2_DCE_RX                         0x1C4 0x424 0x5F0 0x4 0x4
 #define MX8MP_IOMUXC_SAI3_TXFS__UART2_DTE_TX                         0x1C4 0x424 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI3_TXFS__GPIO4_IO31                           0x1C4 0x424 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_BIT_STREAM03                0x1C4 0x424 0x4CC 0x6 0x5
+#define MX8MP_IOMUXC_SAI3_TXFS__AUDIOMIX_BIT_STREAM03                0x1C4 0x424 0x4CC 0x6 0x6
 #define MX8MP_IOMUXC_SAI3_TXFS__TPSMP_HDATA01                        0x1C4 0x424 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI3_TX_BCLK                 0x1C8 0x428 0x4E8 0x0 0x1
 #define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_SAI2_TX_DATA02               0x1C8 0x428 0x000 0x1 0x0
 #define MX8MP_IOMUXC_SAI3_TXC__UART2_DCE_TX                          0x1C8 0x428 0x000 0x4 0x0
 #define MX8MP_IOMUXC_SAI3_TXC__UART2_DTE_RX                          0x1C8 0x428 0x5F0 0x4 0x5
 #define MX8MP_IOMUXC_SAI3_TXC__GPIO5_IO00                            0x1C8 0x428 0x000 0x5 0x0
-#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_BIT_STREAM02                 0x1C8 0x428 0x4C8 0x6 0x6
+#define MX8MP_IOMUXC_SAI3_TXC__AUDIOMIX_BIT_STREAM02                 0x1C8 0x428 0x4C8 0x6 0x7
 #define MX8MP_IOMUXC_SAI3_TXC__TPSMP_HDATA02                         0x1C8 0x428 0x000 0x7 0x0
 #define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI3_TX_DATA00               0x1CC 0x42C 0x000 0x0 0x0
 #define MX8MP_IOMUXC_SAI3_TXD__AUDIOMIX_SAI2_TX_DATA03               0x1CC 0x42C 0x000 0x1 0x0
index 9b1616e59d5826b5fa1a606d77adedecd1393156..9f6ba763238dee729e3126b82e89f5929a0dfb0b 100644 (file)
 
                aips1: bus@30000000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x301f0000 0x10000>;
+                       reg = <0x30000000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
                aips2: bus@30400000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x305f0000 0x400000>;
+                       reg = <0x30400000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
 
                aips3: bus@30800000 {
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x309f0000 0x400000>;
+                       reg = <0x30800000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 75b384217a239296b0775f45bda218b8532f2e0b..bab88369be1baac6d81d109049ab881586c7fd25 100644 (file)
 
                bus@30000000 { /* AIPS1 */
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x301f0000 0x10000>;
+                       reg = <0x30000000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30000000 0x30000000 0x400000>;
 
                bus@30400000 { /* AIPS2 */
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x305f0000 0x10000>;
+                       reg = <0x30400000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30400000 0x30400000 0x400000>;
 
                bus@30800000 { /* AIPS3 */
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x309f0000 0x10000>;
+                       reg = <0x30800000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x30800000 0x30800000 0x400000>,
 
                bus@32c00000 { /* AIPS4 */
                        compatible = "fsl,aips-bus", "simple-bus";
-                       reg = <0x32df0000 0x10000>;
+                       reg = <0x32c00000 0x400000>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x32c00000 0x32c00000 0x400000>;
index ccb8e88a60c5a5b2512558a876f1c1b1c20558b5..d819e44d94a8d12a8747619c78916d06104c05bd 100644 (file)
                                      "venc_lt_sel";
                        assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
                                          <&topckgen CLK_TOP_VENC_LT_SEL>;
-                       assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
-                                                <&topckgen CLK_TOP_UNIVPLL1_D2>;
+                       assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>,
+                                                <&topckgen CLK_TOP_VCODECPLL_370P5>;
                };
 
                jpegdec: jpegdec@18004000 {
index af87350b5547b9de6a56862b9035f35911b4fd49..c4abbccf2bed041bdfd0592cfa50debd68c80343 100644 (file)
        s11 {
                qcom,saw-leader;
                regulator-always-on;
-               regulator-min-microvolt = <1230000>;
-               regulator-max-microvolt = <1230000>;
+               regulator-min-microvolt = <980000>;
+               regulator-max-microvolt = <980000>;
        };
 };
 
        status = "okay";
 };
 
+&q6asmdai {
+       dai@0 {
+               reg = <0>;
+       };
+
+       dai@1 {
+               reg = <1>;
+       };
+
+       dai@2 {
+               reg = <2>;
+       };
+};
+
 &sound {
        compatible = "qcom,apq8096-sndcard";
        model = "DB820c";
-       audio-routing = "RX_BIAS", "MCLK";
+       audio-routing = "RX_BIAS", "MCLK",
+               "MM_DL1",  "MultiMedia1 Playback",
+               "MM_DL2",  "MultiMedia2 Playback",
+               "MultiMedia3 Capture", "MM_UL3";
 
        mm1-dai-link {
                link-name = "MultiMedia1";
index 14827adebd94a0a98f590e9d52d412def15f0aba..98634d5c44405e22d265332c9b82c62832eb5696 100644 (file)
                                                reg = <APR_SVC_ASM>;
                                                q6asmdai: dais {
                                                        compatible = "qcom,q6asm-dais";
+                                                       #address-cells = <1>;
+                                                       #size-cells = <0>;
                                                        #sound-dai-cells = <1>;
                                                        iommus = <&lpass_q6_smmu 1>;
                                                };
index a2e05926b429110005e04794b59e3a614c663f1b..21fd6f8d57993ae97d08a6f0d797c8d376540a5f 100644 (file)
 &q6asmdai {
        dai@0 {
                reg = <0>;
-               direction = <2>;
        };
 
        dai@1 {
                reg = <1>;
-               direction = <2>;
        };
 
        dai@2 {
                reg = <2>;
-               direction = <1>;
        };
 
        dai@3 {
index 3b617a75fafa92e8199c1b654d5351e1cf3d8326..51a670ad15b24291bee60d5218ef5889528fa142 100644 (file)
 &q6asmdai {
        dai@0 {
                reg = <0>;
-               direction = <2>;
        };
 
        dai@1 {
                reg = <1>;
-               direction = <1>;
        };
 };
 
index 2afb91ec9c8d92cd9fc8016325780deefb3cb3c0..ac2156ab3e6261749208ddd40fdb8b4687646f21 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index d7c7b9156e08241cfc8fe4f194cb7a79847b2c24..01c4ba0f7be1caff29424f39b50e6f63d1048032 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index 3dde028e22a6d9c9f4ba6c691d1b507f56f37cde..ef8350a062af0ba9e773334682a462a1996c8a5c 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index adbfd8f07d06a3a46a1ee26f49166adea1f231bd..6dff0469322390214b12c3d78206eef523db08eb 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index e01b0508a18fa91ae83772bcbb3d9f1d2b53a2bf..d672b320bc14568531b254d011e37f7311fa7f55 100644 (file)
                ipmmu_vip0: mmu@e7b00000 {
                        compatible = "renesas,ipmmu-r8a77980";
                        reg = <0 0xe7b00000 0 0x1000>;
+                       renesas,ipmmu-main = <&ipmmu_mm 4>;
                        power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
                        #iommu-cells = <1>;
                };
                ipmmu_vip1: mmu@e7960000 {
                        compatible = "renesas,ipmmu-r8a77980";
                        reg = <0 0xe7960000 0 0x1000>;
+                       renesas,ipmmu-main = <&ipmmu_mm 11>;
                        power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
                        #iommu-cells = <1>;
                };
index 4fd2b14fbb8b5a13ba1387474c202248673db705..dc24cec46ae18eedb67ff1b5eb21653d49791620 100644 (file)
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index 67634cb01d6b68567e23d1554b969915181df26c..79c73a99d2fed77f9b244940894f3325313b460d 100644 (file)
 
        hdmi-encoder@39 {
                compatible = "adi,adv7511w";
-               reg = <0x39>, <0x3f>, <0x38>, <0x3c>;
-               reg-names = "main", "edid", "packet", "cec";
+               reg = <0x39>, <0x3f>, <0x3c>, <0x38>;
+               reg-names = "main", "edid", "cec", "packet";
                interrupt-parent = <&gpio1>;
                interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
 
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
                adi,input-clock = "1x";
-               adi,input-style = <1>;
-               adi,input-justification = "evenly";
 
                ports {
                        #address-cells = <1>;
index f809dd6d5dc3c72624df03efeeb08e6ad9af3968..adc9b8bf5eaa6b234ebf684d8a19bc20f65d2f1a 100644 (file)
        };
 
        arm-pmu {
-               compatible = "arm,cortex-a53-pmu";
+               compatible = "arm,cortex-a35-pmu";
                interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
index ac43bc3f7031417a25bb4cf158ffcf82933ddc40..ac7f694079d075a6f0acb87ec63b5e997c553b34 100644 (file)
        };
 
        arm-pmu {
-               compatible = "arm,cortex-a53-pmu";
+               compatible = "arm,cortex-a35-pmu";
                interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
index 49c4b96da3d4038799871db0cb208ef654d24c14..ac29c2744d08649bd42e5db416e2c8a5c98eaed3 100644 (file)
 &gmac2phy {
        phy-supply = <&vcc_phy>;
        clock_in_out = "output";
-       assigned-clocks = <&cru SCLK_MAC2PHY_SRC>;
        assigned-clock-rate = <50000000>;
        assigned-clocks = <&cru SCLK_MAC2PHY>;
        assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
-
+       status = "okay";
 };
 
 &i2c1 {
        status = "okay";
 
-       rk805: rk805@18 {
+       rk805: pmic@18 {
                compatible = "rockchip,rk805";
                reg = <0x18>;
                interrupt-parent = <&gpio2>;
index bf3e546f5266d74d3d22b5c78e46c571d17517f5..ebf3eb222e1fc73a8fead2caf5aba55be2486cb7 100644 (file)
 &i2c1 {
        status = "okay";
 
-       rk805: rk805@18 {
+       rk805: pmic@18 {
                compatible = "rockchip,rk805";
                reg = <0x18>;
                interrupt-parent = <&gpio2>;
index 7e88d88aab985349d93e6caa95e0133917fbbffd..a4d591d91533186ce502f861ff647195d2c63cff 100644 (file)
        grf: syscon@ff100000 {
                compatible = "rockchip,rk3328-grf", "syscon", "simple-mfd";
                reg = <0x0 0xff100000 0x0 0x1000>;
-               #address-cells = <1>;
-               #size-cells = <1>;
 
                io_domains: io-domains {
                        compatible = "rockchip,rk3328-io-voltage-domain";
                };
 
                gmac2phy {
-                       fephyled_speed100: fephyled-speed100 {
-                               rockchip,pins = <0 RK_PD7 1 &pcfg_pull_none>;
-                       };
-
                        fephyled_speed10: fephyled-speed10 {
                                rockchip,pins = <0 RK_PD6 1 &pcfg_pull_none>;
                        };
                                rockchip,pins = <0 RK_PD6 2 &pcfg_pull_none>;
                        };
 
-                       fephyled_rxm0: fephyled-rxm0 {
-                               rockchip,pins = <0 RK_PD5 1 &pcfg_pull_none>;
-                       };
-
-                       fephyled_txm0: fephyled-txm0 {
-                               rockchip,pins = <0 RK_PD5 2 &pcfg_pull_none>;
-                       };
-
-                       fephyled_linkm0: fephyled-linkm0 {
-                               rockchip,pins = <0 RK_PD4 1 &pcfg_pull_none>;
-                       };
-
                        fephyled_rxm1: fephyled-rxm1 {
                                rockchip,pins = <2 RK_PD1 2 &pcfg_pull_none>;
                        };
index 5ea281b55fe2735808c3a12628e3f6c8463bcc52..c49982dfd8fc12ce203d125c8d710b0318563140 100644 (file)
                        "Speaker", "Speaker Amplifier OUTL",
                        "Speaker", "Speaker Amplifier OUTR";
 
-               simple-audio-card,hp-det-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
+               simple-audio-card,hp-det-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_HIGH>;
                simple-audio-card,aux-devs = <&speaker_amp>;
                simple-audio-card,pin-switches = "Speaker";
 
        fusb0: fusb30x@22 {
                compatible = "fcs,fusb302";
                reg = <0x22>;
-               fcs,int_n = <&gpio1 RK_PA2 GPIO_ACTIVE_HIGH>;
+               interrupt-parent = <&gpio1>;
+               interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&fusb0_int_gpio>;
                vbus-supply = <&vbus_typec>;
 
        dc-charger {
                dc_det_gpio: dc-det-gpio {
-                       rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+                       rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_up>;
                };
        };
 
        es8316 {
                hp_det_gpio: hp-det-gpio {
-                       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>;
+                       rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
                };
        };
 
index 74f2c3d490953770e22b1ec8f92fbb949c2a9141..1448f358ed0aa191ff3400eda934eee3203af728 100644 (file)
                reset-names = "usb3-otg";
                status = "disabled";
 
-               usbdrd_dwc3_0: dwc3 {
+               usbdrd_dwc3_0: usb@fe800000 {
                        compatible = "snps,dwc3";
                        reg = <0x0 0xfe800000 0x0 0x100000>;
                        interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
                reset-names = "usb3-otg";
                status = "disabled";
 
-               usbdrd_dwc3_1: dwc3 {
+               usbdrd_dwc3_1: usb@fe900000 {
                        compatible = "snps,dwc3";
                        reg = <0x0 0xfe900000 0x0 0x100000>;
                        interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
        pmugrf: syscon@ff320000 {
                compatible = "rockchip,rk3399-pmugrf", "syscon", "simple-mfd";
                reg = <0x0 0xff320000 0x0 0x1000>;
-               #address-cells = <1>;
-               #size-cells = <1>;
 
                pmu_io_domains: io-domains {
                        compatible = "rockchip,rk3399-pmu-io-voltage-domain";
        gpu: gpu@ff9a0000 {
                compatible = "rockchip,rk3399-mali", "arm,mali-t860";
                reg = <0x0 0xff9a0000 0x0 0x10000>;
-               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>,
-                            <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
-                            <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>;
-               interrupt-names = "gpu", "job", "mmu";
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
+                            <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
+                            <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
+               interrupt-names = "job", "mmu", "gpu";
                clocks = <&cru ACLK_GPU>;
                #cooling-cells = <2>;
                power-domains = <&power RK3399_PD_GPU>;
index 24e534d8504549e7f0ef4d5d8d197372c0062b5a..03d0189f7d68cedbb9aada001060a6256f34a146 100644 (file)
@@ -208,7 +208,7 @@ CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCIE_KIRIN=y
 CONFIG_PCIE_HISI_STB=y
-CONFIG_PCIE_TEGRA194=m
+CONFIG_PCIE_TEGRA194_HOST=m
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_FW_LOADER_USER_HELPER=y
@@ -567,6 +567,7 @@ CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_SDR_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_PLATFORM_SUPPORT=y
 # CONFIG_DVB_NET is not set
 CONFIG_MEDIA_USB_SUPPORT=y
 CONFIG_USB_VIDEO_CLASS=m
@@ -610,8 +611,9 @@ CONFIG_DRM_MSM=m
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_LVDS=m
 CONFIG_DRM_PANEL_SIMPLE=m
-CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
 CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
 CONFIG_DRM_SII902X=m
 CONFIG_DRM_THINE_THC63LVD1024=m
 CONFIG_DRM_TI_SN65DSI86=m
@@ -848,7 +850,8 @@ CONFIG_QCOM_APR=m
 CONFIG_ARCH_R8A774A1=y
 CONFIG_ARCH_R8A774B1=y
 CONFIG_ARCH_R8A774C0=y
-CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A77950=y
+CONFIG_ARCH_R8A77951=y
 CONFIG_ARCH_R8A77960=y
 CONFIG_ARCH_R8A77961=y
 CONFIG_ARCH_R8A77965=y
index 37ca3e8898484572d37ae0222cf13100c4dce952..af2bbca38e70fb3514bf594cafffbf907c8eeffa 100644 (file)
@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            !crypto_simd_usable())
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index 895d3727c1fbcf3691293d5bf700623068a149c7..c5405e6a6db76f7cbbbccda47cc3a116c1c11e45 100644 (file)
@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_neon_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
index e97b092f56b8fcae5531be3db610737b22f7a057..f33ada70c4ed8f84171b202e3f447880e0e7696f 100644 (file)
@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index 32fc8061aa76ffcca455ee85790058c2a6ada1ed..bc5c7b091152056fbcef59719bd4245799897c23 100644 (file)
@@ -304,7 +304,7 @@ do {                                                                        \
                __p = uaccess_mask_ptr(__p);                            \
                __raw_get_user((x), __p, (err));                        \
        } else {                                                        \
-               (x) = 0; (err) = -EFAULT;                               \
+               (x) = (__force __typeof__(x))0; (err) = -EFAULT;        \
        }                                                               \
 } while (0)
 
index 8e9c924423b4ead238f73c1db04fe81bf6979b15..a0b144cfaea716c0dda21d3d935147c3e0bcdb48 100644 (file)
@@ -177,6 +177,7 @@ void machine_kexec(struct kimage *kimage)
         * the offline CPUs. Therefore, we must use the __* variant here.
         */
        __flush_icache_range((uintptr_t)reboot_code_buffer,
+                            (uintptr_t)reboot_code_buffer +
                             arm64_relocate_new_kernel_size);
 
        /* Flush the kimage list and its buffers. */
index b3d3005d9515de52dc5527696049aaffcc9633dd..e7b01904f18017cc1cf49ac39a1eb07a29561691 100644 (file)
@@ -1829,10 +1829,11 @@ static void tracehook_report_syscall(struct pt_regs *regs,
 
 int syscall_trace_enter(struct pt_regs *regs)
 {
-       if (test_thread_flag(TIF_SYSCALL_TRACE) ||
-               test_thread_flag(TIF_SYSCALL_EMU)) {
+       unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
+       if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
                tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
-               if (!in_syscall(regs) || test_thread_flag(TIF_SYSCALL_EMU))
+               if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
                        return -1;
        }
 
index 061f60fe452f773e233c0bffc68b0c614db905a4..bb813d06114a3d9fbaa3995c8a5d1e64c2bfa886 100644 (file)
@@ -176,7 +176,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
                panic("CPU%u detected unsupported configuration\n", cpu);
        }
 
-       return ret;
+       return -EIO;
 }
 
 static void init_gic_priority_masking(void)
index dd2514bb1511fe4fa72271bb9af38564c4fe5607..3862cad2410cfab14a3d028882b7e856cffb344f 100644 (file)
@@ -32,7 +32,7 @@ UBSAN_SANITIZE                        := n
 OBJECT_FILES_NON_STANDARD      := y
 KCOV_INSTRUMENT                        := n
 
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
+CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
 
 ifneq ($(c-gettimeofday-y),)
   CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
index 23ebe51410f06a3435915bddaa98b688209d0819..50a279d3ddd783ad751fefa4790b26aeb4adcfdb 100644 (file)
@@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+
+       if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+               int i;
+
+               for (i = 0; i < 16; i++)
+                       *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
+       }
 out:
        return err;
 }
index d22d0534dd600e24651e9e067894f0ddf88824e6..90186cf6473e0223ab965b24d5df0d1f7e677a5f 100644 (file)
@@ -18,6 +18,7 @@
 
 #define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
 #define CPU_XREG_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+#define CPU_SP_EL0_OFFSET      (CPU_XREG_OFFSET(30) + 8)
 
        .text
        .pushsection    .hyp.text, "ax"
        ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
 .endm
 
+.macro save_sp_el0 ctxt, tmp
+       mrs     \tmp,   sp_el0
+       str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
+.endm
+
+.macro restore_sp_el0 ctxt, tmp
+       ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
+       msr     sp_el0, \tmp
+.endm
+
 /*
  * u64 __guest_enter(struct kvm_vcpu *vcpu,
  *                  struct kvm_cpu_context *host_ctxt);
@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
        // Store the host regs
        save_callee_saved_regs x1
 
+       // Save the host's sp_el0
+       save_sp_el0     x1, x2
+
        // Now the host state is stored if we have a pending RAS SError it must
        // affect the host. If any asynchronous exception is pending we defer
        // the guest entry. The DSB isn't necessary before v8.2 as any SError
@@ -83,6 +97,9 @@ alternative_else_nop_endif
        // when this feature is enabled for kernel code.
        ptrauth_switch_to_guest x29, x0, x1, x2
 
+       // Restore the guest's sp_el0
+       restore_sp_el0 x29, x0
+
        // Restore guest regs x0-x17
        ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
        ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // Store the guest regs x18-x29, lr
        save_callee_saved_regs x1
 
+       // Store the guest's sp_el0
+       save_sp_el0     x1, x2
+
        get_host_ctxt   x2, x3
 
        // Macro ptrauth_switch_to_guest format:
@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // when this feature is enabled for kernel code.
        ptrauth_switch_to_host x1, x2, x3, x4, x5
 
+       // Restore the hosts's sp_el0
+       restore_sp_el0 x2, x3
+
        // Now restore the host regs
        restore_callee_saved_regs x2
 
index c2a13ab3c471241012188b80aac24298cd4cd186..9c5cfb04170ee531c2f3e3fd80a8ab77f6fe4174 100644 (file)
@@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic)
 .macro invalid_vector  label, target = __hyp_panic
        .align  2
 SYM_CODE_START(\label)
-\label:
        b \target
 SYM_CODE_END(\label)
 .endm
index 75b1925763f16ef89ede0c53c4eea9e567bc234a..6d2df9fe0b5d2dcc2e1c9b98229079da666c0c50 100644 (file)
@@ -15,8 +15,9 @@
 /*
  * Non-VHE: Both host and guest must save everything.
  *
- * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
- * which are handled as part of the el2 return state) on every switch.
+ * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
+ * pstate, which are handled as part of the el2 return state) on every
+ * switch (sp_el0 is being dealt with in the assembly code).
  * tpidr_el0 and tpidrro_el0 only need to be switched when going
  * to host userspace or a different VCPU.  EL1 registers only need to be
  * switched when potentially going to run a different VCPU.  The latter two
 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
-
-       /*
-        * The host arm64 Linux uses sp_el0 to point to 'current' and it must
-        * therefore be saved/restored on every entry/exit to/from the guest.
-        */
-       ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
 }
 
 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
 {
        write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
-
-       /*
-        * The host arm64 Linux uses sp_el0 to point to 'current' and it must
-        * therefore be saved/restored on every entry/exit to/from the guest.
-        */
-       write_sysreg(ctxt->gp_regs.regs.sp,       sp_el0);
 }
 
 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
index bbeb6a5a6ba6dea94c69e85413ec94534bd3985f..0be3355e34997544aa43cd652c733a996b9e3ff6 100644 (file)
@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                ptep = (pte_t *)pudp;
        } else if (sz == (CONT_PTE_SIZE)) {
                pmdp = pmd_alloc(mm, pudp, addr);
+               if (!pmdp)
+                       return NULL;
 
                WARN_ON(addr & (sz - 1));
                /*
index 94545d50d40fafe80a9996d749da97487c6a8842..bd31ab12f77de93afe292c9f839169a74d2e5e71 100644 (file)
@@ -8,6 +8,7 @@ config CSKY
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
+       select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
        select COMMON_CLK
        select CLKSRC_MMIO
        select CSKY_MPINTC if CPU_CK860
@@ -38,6 +39,7 @@ config CSKY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_COPY_THREAD_TLS
+       select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DYNAMIC_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_FUNCTION_TRACER
index fb1bbbd91954cdac76b8d42c4bca78acc4a7b947..37f593a4bf53612dd8ff5492bde0e24bec13f574 100644 (file)
@@ -47,7 +47,7 @@ ifeq ($(CSKYABI),abiv2)
 KBUILD_CFLAGS += -mno-stack-size
 endif
 
-ifdef CONFIG_STACKTRACE
+ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS += -mbacktrace
 endif
 
index 5056ebb902d18336ac8208da2cbdeee256b920da..13c23e2c707ccfae861fedddfb16374a527a9776 100644 (file)
@@ -80,7 +80,6 @@
 .endm
 
 .macro RESTORE_ALL
-       psrclr  ie
        ldw     lr, (sp, 4)
        ldw     a0, (sp, 8)
        mtcr    a0, epc
         *   BA     Reserved  C   D   V
         */
        cprcr   r6, cpcr30
-       lsri    r6, 28
-       lsli    r6, 28
+       lsri    r6, 29
+       lsli    r6, 29
        addi    r6, 0xe
        cpwcr   r6, cpcr30
 
        movi    r6, 0
        cpwcr   r6, cpcr31
 .endm
-
-.macro ANDI_R3 rx, imm
-       lsri    \rx, 3
-       andi    \rx, (\imm >> 3)
-.endm
 #endif /* __ASM_CSKY_ENTRY_H */
index a99aff555a0ac1e979ed4aa8378ab80cc1257b87..4fdd6c12e7ff8571cf82ca66061d4d3a4d39b991 100644 (file)
@@ -13,6 +13,8 @@
 #define LSAVE_A1       28
 #define LSAVE_A2       32
 #define LSAVE_A3       36
+#define LSAVE_A4       40
+#define LSAVE_A5       44
 
 #define KSPTOUSP
 #define USPTOKSP
@@ -63,7 +65,6 @@
 .endm
 
 .macro RESTORE_ALL
-       psrclr  ie
        ldw     tls, (sp, 0)
        ldw     lr, (sp, 4)
        ldw     a0, (sp, 8)
         */
        mfcr    r6, cr<30, 15> /* Get MSA0 */
 2:
-       lsri    r6, 28
-       lsli    r6, 28
+       lsri    r6, 29
+       lsli    r6, 29
        addi    r6, 0x1ce
        mtcr    r6, cr<30, 15> /* Set MSA0 */
 
        jmpi    3f /* jump to va */
 3:
 .endm
-
-.macro ANDI_R3 rx, imm
-       lsri    \rx, 3
-       andi    \rx, (\imm >> 3)
-.endm
 #endif /* __ASM_CSKY_ENTRY_H */
index 9331c7ed5958ab0b80e265b17f35cf0e4e3aa3a6..911512bf480f3ef9102e3d24b072dc2a37fd6d07 100644 (file)
@@ -103,6 +103,8 @@ ENTRY(_mcount)
        mov     a0, lr
        subi    a0, 4
        ldw     a1, (sp, 24)
+       lrw     a2, function_trace_op
+       ldw     a2, (a2, 0)
 
        jsr     r26
 
index c6bcd7f7c720bbc2485bb32697881787c07edfb1..24442d8e86f9742a5e0259cb9c16dff3bbafde9c 100644 (file)
@@ -41,8 +41,7 @@ extern struct cpuinfo_csky cpu_data[];
 #define TASK_UNMAPPED_BASE      (TASK_SIZE / 3)
 
 struct thread_struct {
-       unsigned long  ksp;       /* kernel stack pointer */
-       unsigned long  sr;        /* saved status register */
+       unsigned long  sp;        /* kernel stack pointer */
        unsigned long  trap_no;   /* saved status register */
 
        /* FPU regs */
@@ -50,8 +49,7 @@ struct thread_struct {
 };
 
 #define INIT_THREAD  { \
-       .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
-       .sr = DEFAULT_PSR_VALUE, \
+       .sp = sizeof(init_stack) + (unsigned long) &init_stack, \
 }
 
 /*
index aae5aa96cf54480ad03aaf7c87b73afee75d20c2..bcfb7070e48d379e0b1895ec18e071908d3217eb 100644 (file)
@@ -58,6 +58,16 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
        return regs->usp;
 }
 
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+       return regs->regs[4];
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+                                    unsigned long val)
+{
+       regs->regs[4] = val;
+}
+
 extern int regs_query_register_offset(const char *name);
 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
                                                unsigned int n);
index 442fedad026094f2c2e3b7262b2b61b12523e362..8980e4e643919d47861e2a381447f144c0f665d8 100644 (file)
@@ -38,7 +38,13 @@ struct thread_info {
 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
 
 #define thread_saved_fp(tsk) \
-       ((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8))
+       ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8))
+
+#define thread_saved_sp(tsk) \
+       ((unsigned long)(tsk->thread.sp))
+
+#define thread_saved_lr(tsk) \
+       ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15))
 
 static inline struct thread_info *current_thread_info(void)
 {
@@ -54,10 +60,10 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SIGPENDING         0       /* signal pending */
 #define TIF_NOTIFY_RESUME      1       /* callback before returning to user */
 #define TIF_NEED_RESCHED       2       /* rescheduling necessary */
-#define TIF_SYSCALL_TRACE      3       /* syscall trace active */
-#define TIF_SYSCALL_TRACEPOINT 4       /* syscall tracepoint instrumentation */
-#define TIF_SYSCALL_AUDIT      5       /* syscall auditing */
-#define TIF_UPROBE             6       /* uprobe breakpoint or singlestep */
+#define TIF_UPROBE             3       /* uprobe breakpoint or singlestep */
+#define TIF_SYSCALL_TRACE      4       /* syscall trace active */
+#define TIF_SYSCALL_TRACEPOINT 5       /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT      6       /* syscall auditing */
 #define TIF_POLLING_NRFLAG     16      /* poll_idle() is TIF_NEED_RESCHED */
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    20      /* restore signal mask in do_signal() */
@@ -75,4 +81,10 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
+#define _TIF_WORK_MASK         (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+                                _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+
+#define _TIF_SYSCALL_WORK      (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SYSCALL_TRACEPOINT)
+
 #endif /* _ASM_CSKY_THREAD_INFO_H */
index abefa125b93cfd774d0559c9b582762dc8b51bfa..1633ffe5ae15a7ef2d3c44c1211791cbfafea788 100644 (file)
@@ -253,7 +253,7 @@ do {                                                                \
 
 extern int __get_user_bad(void);
 
-#define __copy_user(to, from, n)                       \
+#define ___copy_to_user(to, from, n)                   \
 do {                                                   \
        int w0, w1, w2, w3;                             \
        asm volatile(                                   \
@@ -288,31 +288,34 @@ do {                                                      \
        "       subi    %0, 4           \n"             \
        "       br      3b              \n"             \
        "5:     cmpnei  %0, 0           \n"  /* 1B */   \
-       "       bf      8f              \n"             \
+       "       bf      13f             \n"             \
        "       ldb     %3, (%2, 0)     \n"             \
        "6:     stb     %3, (%1, 0)     \n"             \
        "       addi    %2,  1          \n"             \
        "       addi    %1,  1          \n"             \
        "       subi    %0,  1          \n"             \
        "       br      5b              \n"             \
-       "7:     br      8f              \n"             \
+       "7:     subi    %0,  4          \n"             \
+       "8:     subi    %0,  4          \n"             \
+       "12:    subi    %0,  4          \n"             \
+       "       br      13f             \n"             \
        ".section __ex_table, \"a\"     \n"             \
        ".align   2                     \n"             \
-       ".long    2b, 7b                \n"             \
-       ".long    9b, 7b                \n"             \
-       ".long   10b, 7b                \n"             \
+       ".long    2b, 13f               \n"             \
+       ".long    4b, 13f               \n"             \
+       ".long    6b, 13f               \n"             \
+       ".long    9b, 12b               \n"             \
+       ".long   10b, 8b                \n"             \
        ".long   11b, 7b                \n"             \
-       ".long    4b, 7b                \n"             \
-       ".long    6b, 7b                \n"             \
        ".previous                      \n"             \
-       "8:                             \n"             \
+       "13:                            \n"             \
        : "=r"(n), "=r"(to), "=r"(from), "=r"(w0),      \
          "=r"(w1), "=r"(w2), "=r"(w3)                  \
        : "0"(n), "1"(to), "2"(from)                    \
        : "memory");                                    \
 } while (0)
 
-#define __copy_user_zeroing(to, from, n)               \
+#define ___copy_from_user(to, from, n)                 \
 do {                                                   \
        int tmp;                                        \
        int nsave;                                      \
@@ -355,22 +358,22 @@ do {                                                      \
        "       addi    %1,  1          \n"             \
        "       subi    %0,  1          \n"             \
        "       br      5b              \n"             \
-       "8:     mov     %3, %0          \n"             \
-       "       movi    %4, 0           \n"             \
-       "9:     stb     %4, (%1, 0)     \n"             \
-       "       addi    %1, 1           \n"             \
-       "       subi    %3, 1           \n"             \
-       "       cmpnei  %3, 0           \n"             \
-       "       bt      9b              \n"             \
-       "       br      7f              \n"             \
+       "8:     stw     %3, (%1, 0)     \n"             \
+       "       subi    %0, 4           \n"             \
+       "       bf      7f              \n"             \
+       "9:     subi    %0, 8           \n"             \
+       "       bf      7f              \n"             \
+       "13:    stw     %3, (%1, 8)     \n"             \
+       "       subi    %0, 12          \n"             \
+       "       bf      7f              \n"             \
        ".section __ex_table, \"a\"     \n"             \
        ".align   2                     \n"             \
-       ".long    2b, 8b                \n"             \
+       ".long    2b, 7f                \n"             \
+       ".long    4b, 7f                \n"             \
+       ".long    6b, 7f                \n"             \
        ".long   10b, 8b                \n"             \
-       ".long   11b, 8b                \n"             \
-       ".long   12b, 8b                \n"             \
-       ".long    4b, 8b                \n"             \
-       ".long    6b, 8b                \n"             \
+       ".long   11b, 9b                \n"             \
+       ".long   12b,13b                \n"             \
        ".previous                      \n"             \
        "7:                             \n"             \
        : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave),   \
index fd6d9dc8b7f3b1adb713d1549d85d9052b5012b1..37f37c0e934a8f36c95f7e2e0654ecf63b08e503 100644 (file)
@@ -3,7 +3,7 @@ extra-y := head.o vmlinux.lds
 
 obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
 obj-y += power.o syscall.o syscall_table.o setup.o
-obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
+obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
 obj-y += probes/
 
 obj-$(CONFIG_MODULES)                  += module.o
index f8be348df9e49a656d0e7099724313a522d7f20b..17479860d43dcbb045b6959a3cb8022590814d22 100644 (file)
@@ -18,8 +18,7 @@ int main(void)
        DEFINE(TASK_ACTIVE_MM,    offsetof(struct task_struct, active_mm));
 
        /* offsets into the thread struct */
-       DEFINE(THREAD_KSP,        offsetof(struct thread_struct, ksp));
-       DEFINE(THREAD_SR,         offsetof(struct thread_struct, sr));
+       DEFINE(THREAD_KSP,        offsetof(struct thread_struct, sp));
        DEFINE(THREAD_FESR,       offsetof(struct thread_struct, user_fp.fesr));
        DEFINE(THREAD_FCR,        offsetof(struct thread_struct, user_fp.fcr));
        DEFINE(THREAD_FPREG,      offsetof(struct thread_struct, user_fp.vr));
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c
deleted file mode 100644 (file)
index d67f977..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-
-#include <linux/ptrace.h>
-
-int kstack_depth_to_print = 48;
-
-void show_trace(unsigned long *stack)
-{
-       unsigned long *stack_end;
-       unsigned long *stack_start;
-       unsigned long *fp;
-       unsigned long addr;
-
-       addr = (unsigned long) stack & THREAD_MASK;
-       stack_start = (unsigned long *) addr;
-       stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
-       fp = stack;
-       pr_info("\nCall Trace:");
-
-       while (fp > stack_start && fp < stack_end) {
-#ifdef CONFIG_STACKTRACE
-               addr    = fp[1];
-               fp      = (unsigned long *) fp[0];
-#else
-               addr    = *fp++;
-#endif
-               if (__kernel_text_address(addr))
-                       pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
-       }
-       pr_cont("\n");
-}
-
-void show_stack(struct task_struct *task, unsigned long *stack)
-{
-       if (!stack) {
-               if (task)
-                       stack = (unsigned long *)thread_saved_fp(task);
-               else
-#ifdef CONFIG_STACKTRACE
-                       asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
-#else
-                       stack = (unsigned long *)&stack;
-#endif
-       }
-
-       show_trace(stack);
-}
index 364819536f2e42d48c13df5c9a37a550d17a40dd..f13800383a19aab64d09750dc5ec01cc1bea02d4 100644 (file)
@@ -128,39 +128,41 @@ tlbop_end 1
 ENTRY(csky_systemcall)
        SAVE_ALL TRAP0_SIZE
        zero_fp
-#ifdef CONFIG_RSEQ_DEBUG
-       mov     a0, sp
-       jbsr    rseq_syscall
-#endif
        psrset  ee, ie
 
-       lrw     r11, __NR_syscalls
-       cmphs   syscallid, r11          /* Check nr of syscall */
-       bt      ret_from_exception
+       lrw     r9, __NR_syscalls
+       cmphs   syscallid, r          /* Check nr of syscall */
+       bt      1f
 
-       lrw     r13, sys_call_table
-       ixw     r13, syscallid
-       ldw     r11, (r13)
-       cmpnei  r11, 0
+       lrw     r9, sys_call_table
+       ixw     r9, syscallid
+       ldw     syscallid, (r9)
+       cmpnei  syscallid, 0
        bf      ret_from_exception
 
        mov     r9, sp
        bmaski  r10, THREAD_SHIFT
        andn    r9, r10
-       ldw     r12, (r9, TINFO_FLAGS)
-       ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
-       cmpnei  r12, 0
+       ldw     r10, (r9, TINFO_FLAGS)
+       lrw     r9, _TIF_SYSCALL_WORK
+       and     r10, r9
+       cmpnei  r10, 0
        bt      csky_syscall_trace
 #if defined(__CSKYABIV2__)
        subi    sp, 8
        stw     r5, (sp, 0x4)
        stw     r4, (sp, 0x0)
-       jsr     r11                      /* Do system call */
+       jsr     syscallid                      /* Do system call */
        addi    sp, 8
 #else
-       jsr     r11
+       jsr     syscallid
 #endif
        stw     a0, (sp, LSAVE_A0)      /* Save return value */
+1:
+#ifdef CONFIG_DEBUG_RSEQ
+       mov     a0, sp
+       jbsr    rseq_syscall
+#endif
        jmpi    ret_from_exception
 
 csky_syscall_trace:
@@ -173,18 +175,23 @@ csky_syscall_trace:
        ldw     a3, (sp, LSAVE_A3)
 #if defined(__CSKYABIV2__)
        subi    sp, 8
-       stw     r5, (sp, 0x4)
-       stw     r4, (sp, 0x0)
+       ldw     r9, (sp, LSAVE_A4)
+       stw     r9, (sp, 0x0)
+       ldw     r9, (sp, LSAVE_A5)
+       stw     r9, (sp, 0x4)
+       jsr     syscallid                     /* Do system call */
+       addi    sp, 8
 #else
        ldw     r6, (sp, LSAVE_A4)
        ldw     r7, (sp, LSAVE_A5)
-#endif
-       jsr     r11                     /* Do system call */
-#if defined(__CSKYABIV2__)
-       addi    sp, 8
+       jsr     syscallid                     /* Do system call */
 #endif
        stw     a0, (sp, LSAVE_A0)      /* Save return value */
 
+#ifdef CONFIG_DEBUG_RSEQ
+       mov     a0, sp
+       jbsr    rseq_syscall
+#endif
        mov     a0, sp                  /* right now, sp --> pt_regs */
        jbsr    syscall_trace_exit
        br      ret_from_exception
@@ -200,18 +207,20 @@ ENTRY(ret_from_fork)
        mov     r9, sp
        bmaski  r10, THREAD_SHIFT
        andn    r9, r10
-       ldw     r12, (r9, TINFO_FLAGS)
-       ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
-       cmpnei  r12, 0
+       ldw     r10, (r9, TINFO_FLAGS)
+       lrw     r9, _TIF_SYSCALL_WORK
+       and     r10, r9
+       cmpnei  r10, 0
        bf      ret_from_exception
        mov     a0, sp                  /* sp = pt_regs pointer */
        jbsr    syscall_trace_exit
 
 ret_from_exception:
-       ld      syscallid, (sp, LSAVE_PSR)
-       btsti   syscallid, 31
-       bt      1f
+       psrclr  ie
+       ld      r9, (sp, LSAVE_PSR)
+       btsti   r9, 31
 
+       bt      1f
        /*
         * Load address of current->thread_info, Then get address of task_struct
         * Get task_needreshed in task_struct
@@ -220,11 +229,24 @@ ret_from_exception:
        bmaski  r10, THREAD_SHIFT
        andn    r9, r10
 
-       ldw     r12, (r9, TINFO_FLAGS)
-       andi    r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
-       cmpnei  r12, 0
+       ldw     r10, (r9, TINFO_FLAGS)
+       lrw     r9, _TIF_WORK_MASK
+       and     r10, r9
+       cmpnei  r10, 0
        bt      exit_work
 1:
+#ifdef CONFIG_PREEMPTION
+       mov     r9, sp
+       bmaski  r10, THREAD_SHIFT
+       andn    r9, r10
+
+       ldw     r10, (r9, TINFO_PREEMPT)
+       cmpnei  r10, 0
+       bt      2f
+       jbsr    preempt_schedule_irq    /* irq en/disable is done inside */
+2:
+#endif
+
 #ifdef CONFIG_TRACE_IRQFLAGS
        ld      r10, (sp, LSAVE_PSR)
        btsti   r10, 6
@@ -235,14 +257,15 @@ ret_from_exception:
        RESTORE_ALL
 
 exit_work:
-       lrw     syscallid, ret_from_exception
-       mov     lr, syscallid
+       lrw     r9, ret_from_exception
+       mov     lr, r9
 
-       btsti   r12, TIF_NEED_RESCHED
+       btsti   r10, TIF_NEED_RESCHED
        bt      work_resched
 
+       psrset  ie
        mov     a0, sp
-       mov     a1, r12
+       mov     a1, r10
        jmpi    do_notify_resume
 
 work_resched:
@@ -291,34 +314,10 @@ ENTRY(csky_irq)
        jbsr    trace_hardirqs_off
 #endif
 
-#ifdef CONFIG_PREEMPTION
-       mov     r9, sp                  /* Get current stack  pointer */
-       bmaski  r10, THREAD_SHIFT
-       andn    r9, r10                 /* Get thread_info */
-
-       /*
-        * Get task_struct->stack.preempt_count for current,
-        * and increase 1.
-        */
-       ldw     r12, (r9, TINFO_PREEMPT)
-       addi    r12, 1
-       stw     r12, (r9, TINFO_PREEMPT)
-#endif
 
        mov     a0, sp
        jbsr    csky_do_IRQ
 
-#ifdef CONFIG_PREEMPTION
-       subi    r12, 1
-       stw     r12, (r9, TINFO_PREEMPT)
-       cmpnei  r12, 0
-       bt      2f
-       ldw     r12, (r9, TINFO_FLAGS)
-       btsti   r12, TIF_NEED_RESCHED
-       bf      2f
-       jbsr    preempt_schedule_irq    /* irq en/disable is done inside */
-#endif
-2:
        jmpi    ret_from_exception
 
 /*
@@ -330,11 +329,6 @@ ENTRY(__switch_to)
        lrw     a3, TASK_THREAD
        addu    a3, a0
 
-       mfcr    a2, psr                 /* Save PSR value */
-       stw     a2, (a3, THREAD_SR)     /* Save PSR in task struct */
-       bclri   a2, 6                   /* Disable interrupts */
-       mtcr    a2, psr
-
        SAVE_SWITCH_STACK
 
        stw     sp, (a3, THREAD_KSP)
@@ -345,12 +339,9 @@ ENTRY(__switch_to)
 
        ldw     sp, (a3, THREAD_KSP)    /* Set next kernel sp */
 
-       ldw     a2, (a3, THREAD_SR)     /* Set next PSR */
-       mtcr    a2, psr
-
 #if  defined(__CSKYABIV2__)
-       addi    r7, a1, TASK_THREAD_INFO
-       ldw     tls, (r7, TINFO_TP_VALUE)
+       addi    a3, a1, TASK_THREAD_INFO
+       ldw     tls, (a3, TINFO_TP_VALUE)
 #endif
 
        RESTORE_SWITCH_STACK
index 44628e3f7fa689cf6ec9856dfb79bc4221da5a60..3c425b84e3be6f59b04b9eb7a64ea61ccf4df78a 100644 (file)
@@ -202,6 +202,7 @@ int ftrace_disable_ftrace_graph_caller(void)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
+#ifdef CONFIG_DYNAMIC_FTRACE
 #ifndef CONFIG_CPU_HAS_ICACHE_INS
 struct ftrace_modify_param {
        int command;
@@ -231,6 +232,7 @@ void arch_ftrace_update_code(int command)
        stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
 }
 #endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
 
 /* _mcount is defined in abi's mcount.S */
 EXPORT_SYMBOL(_mcount);
index e68ff375c8f88fe71a5006bf465df25ab3873655..ab55e98ee8f62dc3f98f4d6529d7c05775670878 100644 (file)
@@ -12,12 +12,17 @@ struct stackframe {
 
 static int unwind_frame_kernel(struct stackframe *frame)
 {
-       if (kstack_end((void *)frame->fp))
+       unsigned long low = (unsigned long)task_stack_page(current);
+       unsigned long high = low + THREAD_SIZE;
+
+       if (unlikely(frame->fp < low || frame->fp > high))
                return -EPERM;
-       if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
+
+       if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
                return -EPERM;
 
        *frame = *(struct stackframe *)frame->fp;
+
        if (__kernel_text_address(frame->lr)) {
                int graph = 0;
 
index b3a56c260e3e14347ea6fdb3e3e4bd3351346643..1a9e0961b2b5b8a6ade8b583d6cbc299815c82a4 100644 (file)
 
 #define UPROBE_TRAP_NR UINT_MAX
 
+bool is_swbp_insn(uprobe_opcode_t *insn)
+{
+       return (*insn & 0xffff) == UPROBE_SWBP_INSN;
+}
+
 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
 {
        return instruction_pointer(regs);
index f7b231ca269a0dbc2b5fd76dbffcb5429fb01c6d..8b3fad062ab2d8549c7aa1d594356d88f572b1ef 100644 (file)
@@ -35,7 +35,7 @@ void flush_thread(void){}
  */
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
-       struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
+       struct switch_stack *sw = (struct switch_stack *)tsk->thread.sp;
 
        return sw->r15;
 }
@@ -56,8 +56,8 @@ int copy_thread_tls(unsigned long clone_flags,
        childstack = ((struct switch_stack *) childregs) - 1;
        memset(childstack, 0, sizeof(struct switch_stack));
 
-       /* setup ksp for switch_to !!! */
-       p->thread.ksp = (unsigned long)childstack;
+       /* setup thread.sp for switch_to !!! */
+       p->thread.sp = (unsigned long)childstack;
 
        if (unlikely(p->flags & PF_KTHREAD)) {
                memset(childregs, 0, sizeof(struct pt_regs));
@@ -98,37 +98,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
        return 1;
 }
 
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long lr;
-       unsigned long *fp, *stack_start, *stack_end;
-       int count = 0;
-
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-
-       stack_start = (unsigned long *)end_of_stack(p);
-       stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
-
-       fp = (unsigned long *) thread_saved_fp(p);
-       do {
-               if (fp < stack_start || fp > stack_end)
-                       return 0;
-#ifdef CONFIG_STACKTRACE
-               lr = fp[1];
-               fp = (unsigned long *)fp[0];
-#else
-               lr = *fp++;
-#endif
-               if (!in_sched_functions(lr) &&
-                   __kernel_text_address(lr))
-                       return lr;
-       } while (count++ < 16);
-
-       return 0;
-}
-EXPORT_SYMBOL(get_wchan);
-
 #ifndef CONFIG_CPU_PM_NONE
 void arch_cpu_idle(void)
 {
index 21ac2608f2057b630d4cba4c87eae7dde00db9d9..5a82230bddf988409bbb4e0640ebe95cb003b537 100644 (file)
@@ -41,6 +41,9 @@ static void singlestep_disable(struct task_struct *tsk)
 
        regs = task_pt_regs(tsk);
        regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
+
+       /* Enable irq */
+       regs->sr |= BIT(6);
 }
 
 static void singlestep_enable(struct task_struct *tsk)
@@ -49,6 +52,9 @@ static void singlestep_enable(struct task_struct *tsk)
 
        regs = task_pt_regs(tsk);
        regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
+
+       /* Disable irq */
+       regs->sr &= ~BIT(6);
 }
 
 /*
index fec777a643f13d38b419899d9abe6a6daab52300..92809e1da723c8d4bc149cf9a56e70d262e688fd 100644 (file)
 // SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
 
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
 #include <linux/ftrace.h>
+#include <linux/ptrace.h>
 
-void save_stack_trace(struct stack_trace *trace)
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+       unsigned long fp;
+       unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+                            bool (*fn)(unsigned long, void *), void *arg)
 {
-       save_stack_trace_tsk(current, trace);
+       unsigned long fp, sp, pc;
+
+       if (regs) {
+               fp = frame_pointer(regs);
+               sp = user_stack_pointer(regs);
+               pc = instruction_pointer(regs);
+       } else if (task == NULL || task == current) {
+               const register unsigned long current_sp __asm__ ("sp");
+               const register unsigned long current_fp __asm__ ("r8");
+               fp = current_fp;
+               sp = current_sp;
+               pc = (unsigned long)walk_stackframe;
+       } else {
+               /* task blocked in __switch_to */
+               fp = thread_saved_fp(task);
+               sp = thread_saved_sp(task);
+               pc = thread_saved_lr(task);
+       }
+
+       for (;;) {
+               unsigned long low, high;
+               struct stackframe *frame;
+
+               if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+                       break;
+
+               /* Validate frame pointer */
+               low = sp;
+               high = ALIGN(sp, THREAD_SIZE);
+               if (unlikely(fp < low || fp > high || fp & 0x3))
+                       break;
+               /* Unwind stack frame */
+               frame = (struct stackframe *)fp;
+               sp = fp;
+               fp = frame->fp;
+               pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+                                          (unsigned long *)(fp - 8));
+       }
 }
-EXPORT_SYMBOL_GPL(save_stack_trace);
 
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+#else /* !CONFIG_FRAME_POINTER */
+
+static void notrace walk_stackframe(struct task_struct *task,
+       struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
 {
-       unsigned long *fp, *stack_start, *stack_end;
-       unsigned long addr;
-       int skip = trace->skip;
-       int savesched;
-       int graph_idx = 0;
+       unsigned long sp, pc;
+       unsigned long *ksp;
 
-       if (tsk == current) {
-               asm volatile("mov %0, r8\n":"=r"(fp));
-               savesched = 1;
+       if (regs) {
+               sp = user_stack_pointer(regs);
+               pc = instruction_pointer(regs);
+       } else if (task == NULL || task == current) {
+               const register unsigned long current_sp __asm__ ("sp");
+               sp = current_sp;
+               pc = (unsigned long)walk_stackframe;
        } else {
-               fp = (unsigned long *)thread_saved_fp(tsk);
-               savesched = 0;
+               /* task blocked in __switch_to */
+               sp = thread_saved_sp(task);
+               pc = thread_saved_lr(task);
        }
 
-       addr = (unsigned long) fp & THREAD_MASK;
-       stack_start = (unsigned long *) addr;
-       stack_end = (unsigned long *) (addr + THREAD_SIZE);
-
-       while (fp > stack_start && fp < stack_end) {
-               unsigned long lpp, fpp;
+       if (unlikely(sp & 0x3))
+               return;
 
-               fpp = fp[0];
-               lpp = fp[1];
-               if (!__kernel_text_address(lpp))
+       ksp = (unsigned long *)sp;
+       while (!kstack_end(ksp)) {
+               if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
                        break;
-               else
-                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
-
-               if (savesched || !in_sched_functions(lpp)) {
-                       if (skip) {
-                               skip--;
-                       } else {
-                               trace->entries[trace->nr_entries++] = lpp;
-                               if (trace->nr_entries >= trace->max_entries)
-                                       break;
-                       }
-               }
-               fp = (unsigned long *)fpp;
+               pc = (*ksp++) - 0x4;
        }
 }
+#endif /* CONFIG_FRAME_POINTER */
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+       print_ip_sym(pc);
+       return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+       pr_cont("Call Trace:\n");
+       walk_stackframe(task, NULL, print_trace_address, NULL);
+}
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+       if (!in_sched_functions(pc)) {
+               unsigned long *p = arg;
+               *p = pc;
+               return true;
+       }
+       return false;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+       unsigned long pc = 0;
+
+       if (likely(task && task != current && task->state != TASK_RUNNING))
+               walk_stackframe(task, NULL, save_wchan, &pc);
+       return pc;
+}
+
+#ifdef CONFIG_STACKTRACE
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+       struct stack_trace *trace = arg;
+
+       if (unlikely(nosched && in_sched_functions(pc)))
+               return false;
+       if (unlikely(trace->skip > 0)) {
+               trace->skip--;
+               return false;
+       }
+
+       trace->entries[trace->nr_entries++] = pc;
+       return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+       return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       walk_stackframe(tsk, NULL, save_trace, trace);
+}
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
index 647a23986fb502bb7fad776cb05bce854ef956f3..3c9bd645e6431db71e861aa307943330a57f534d 100644 (file)
@@ -7,10 +7,7 @@
 unsigned long raw_copy_from_user(void *to, const void *from,
                        unsigned long n)
 {
-       if (access_ok(from, n))
-               __copy_user_zeroing(to, from, n);
-       else
-               memset(to, 0, n);
+       ___copy_from_user(to, from, n);
        return n;
 }
 EXPORT_SYMBOL(raw_copy_from_user);
@@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
 unsigned long raw_copy_to_user(void *to, const void *from,
                        unsigned long n)
 {
-       if (access_ok(to, n))
-               __copy_user(to, from, n);
+       ___copy_to_user(to, from, n);
        return n;
 }
 EXPORT_SYMBOL(raw_copy_to_user);
index 410a769ece9581f96bec0382567b313a17169401..3eb397415381077d2e5d01954f6945f4cc48b3d2 100644 (file)
@@ -6,7 +6,7 @@
 #define _ASM_IA64_DEVICE_H
 
 struct dev_archdata {
-#ifdef CONFIG_INTEL_IOMMU
+#ifdef CONFIG_IOMMU_API
        void *iommu; /* hook for IOMMU specific extension */
 #endif
 };
index 5224fb38d766d5e76ed6055c80a8c31ce1b17df4..01d7071b23f7ad3f70736dd0ae88fbca0fcaeed1 100644 (file)
@@ -562,7 +562,7 @@ void __init mem_init(void)
                        > BITS_PER_LONG);
 
        high_memory = __va((max_pfn << PAGE_SHIFT));
-       set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
+       set_max_mapnr(max_low_pfn);
        memblock_free_all();
 
 #ifdef CONFIG_PA11
index 924c541a926008bd1bb2fab1f1b33b7acf0c408f..b29d7cb38368bf2ea88254e2ca0ca2b2564a2385 100644 (file)
@@ -126,11 +126,12 @@ config PPC
        select ARCH_HAS_MMIOWB                  if PPC64
        select ARCH_HAS_PHYS_TO_DMA
        select ARCH_HAS_PMEM_API
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PTE_DEVMAP              if PPC_BOOK3S_64
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MEMBARRIER_CALLBACKS
        select ARCH_HAS_SCALED_CPUTIME          if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
-       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+       select ARCH_HAS_STRICT_KERNEL_RWX       if (PPC32 && !HIBERNATION)
        select ARCH_HAS_TICK_BROADCAST          if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UACCESS_FLUSHCACHE
        select ARCH_HAS_UACCESS_MCSAFE          if PPC64
index 34a7215ae81ecee0ffb34feee20d6b4a7a3ddaa0..2a0a467d29855adc644dbbda59575489d123a957 100644 (file)
@@ -17,9 +17,9 @@
  * updating the accessed and modified bits in the page table tree.
  */
 
-#define _PAGE_USER     0x001   /* usermode access allowed */
-#define _PAGE_RW       0x002   /* software: user write access allowed */
-#define _PAGE_PRESENT  0x004   /* software: pte contains a translation */
+#define _PAGE_PRESENT  0x001   /* software: pte contains a translation */
+#define _PAGE_HASHPTE  0x002   /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER     0x004   /* usermode access allowed */
 #define _PAGE_GUARDED  0x008   /* G: prohibit speculative access */
 #define _PAGE_COHERENT 0x010   /* M: enforce memory coherence (SMP systems) */
 #define _PAGE_NO_CACHE 0x020   /* I: cache inhibit */
@@ -27,7 +27,7 @@
 #define _PAGE_DIRTY    0x080   /* C: page changed */
 #define _PAGE_ACCESSED 0x100   /* R: page referenced */
 #define _PAGE_EXEC     0x200   /* software: exec allowed */
-#define _PAGE_HASHPTE  0x400   /* hash_page has made an HPTE for this pte */
+#define _PAGE_RW       0x400   /* software: user write access allowed */
 #define _PAGE_SPECIAL  0x800   /* software: Special page */
 
 #ifdef CONFIG_PTE_64BIT
index 3c0ba22dc36001ea2b64e3b47a9a31bc19fb82b3..db0a1c2815875c7d2efb0adc9867ebe8591b2709 100644 (file)
@@ -75,7 +75,7 @@
 
 .macro kuap_check      current, gpr
 #ifdef CONFIG_PPC_KUAP_DEBUG
-       lwz     \gpr2, KUAP(thread)
+       lwz     \gpr, KUAP(thread)
 999:   twnei   \gpr, 0
        EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
 #endif
index e0e71777961ff87cd9fb08115322d1d7c769f038..3a0db7b0b46efcd9780ed23a8ec7a9852ec18cdc 100644 (file)
@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void)
        }                                                               \
 } while(0)
 
+static inline bool __lazy_irq_pending(u8 irq_happened)
+{
+       return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
+ */
 static inline bool lazy_irq_pending(void)
 {
-       return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+       return __lazy_irq_pending(get_paca()->irq_happened);
+}
+
+/*
+ * Check if a lazy IRQ is pending, with no debugging checks.
+ * Should be called with IRQs hard disabled.
+ * For use in RI disabled code or other constrained situations.
+ */
+static inline bool lazy_irq_pending_nocheck(void)
+{
+       return __lazy_irq_pending(local_paca->irq_happened);
 }
 
 /*
index 2f500debae214f03759d26c896d9e237e5bcdc4d..0969285996cb311dadb6cf2f7742a83425b1e072 100644 (file)
@@ -166,13 +166,17 @@ do {                                                              \
 ({                                                             \
        long __pu_err;                                          \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
+       __typeof__(*(ptr)) __pu_val = (x);                      \
+       __typeof__(size) __pu_size = (size);                    \
+                                                               \
        if (!is_kernel_addr((unsigned long)__pu_addr))          \
                might_fault();                                  \
-       __chk_user_ptr(ptr);                                    \
+       __chk_user_ptr(__pu_addr);                              \
        if (do_allow)                                                           \
-               __put_user_size((x), __pu_addr, (size), __pu_err);              \
+               __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);      \
        else                                                                    \
-               __put_user_size_allowed((x), __pu_addr, (size), __pu_err);      \
+               __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
+                                                               \
        __pu_err;                                               \
 })
 
@@ -180,9 +184,13 @@ do {                                                               \
 ({                                                                     \
        long __pu_err = -EFAULT;                                        \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
+       __typeof__(*(ptr)) __pu_val = (x);                              \
+       __typeof__(size) __pu_size = (size);                            \
+                                                                       \
        might_fault();                                                  \
-       if (access_ok(__pu_addr, size))                 \
-               __put_user_size((x), __pu_addr, (size), __pu_err);      \
+       if (access_ok(__pu_addr, __pu_size))                            \
+               __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+                                                                       \
        __pu_err;                                                       \
 })
 
@@ -190,8 +198,12 @@ do {                                                               \
 ({                                                             \
        long __pu_err;                                          \
        __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
-       __chk_user_ptr(ptr);                                    \
-       __put_user_size((x), __pu_addr, (size), __pu_err);      \
+       __typeof__(*(ptr)) __pu_val = (x);                      \
+       __typeof__(size) __pu_size = (size);                    \
+                                                               \
+       __chk_user_ptr(__pu_addr);                              \
+       __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+                                                               \
        __pu_err;                                               \
 })
 
@@ -283,15 +295,18 @@ do {                                                              \
        long __gu_err;                                          \
        __long_type(*(ptr)) __gu_val;                           \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
-       __chk_user_ptr(ptr);                                    \
+       __typeof__(size) __gu_size = (size);                    \
+                                                               \
+       __chk_user_ptr(__gu_addr);                              \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
        barrier_nospec();                                       \
        if (do_allow)                                                           \
-               __get_user_size(__gu_val, __gu_addr, (size), __gu_err);         \
+               __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);      \
        else                                                                    \
-               __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \
+               __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
        (x) = (__typeof__(*(ptr)))__gu_val;                     \
+                                                               \
        __gu_err;                                               \
 })
 
@@ -300,12 +315,15 @@ do {                                                              \
        long __gu_err = -EFAULT;                                        \
        __long_type(*(ptr)) __gu_val = 0;                               \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
+       __typeof__(size) __gu_size = (size);                            \
+                                                                       \
        might_fault();                                                  \
-       if (access_ok(__gu_addr, (size))) {             \
+       if (access_ok(__gu_addr, __gu_size)) {                          \
                barrier_nospec();                                       \
-               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+               __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
        }                                                               \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
+                                                                       \
        __gu_err;                                                       \
 })
 
@@ -314,10 +332,13 @@ do {                                                              \
        long __gu_err;                                          \
        __long_type(*(ptr)) __gu_val;                           \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
-       __chk_user_ptr(ptr);                                    \
+       __typeof__(size) __gu_size = (size);                    \
+                                                               \
+       __chk_user_ptr(__gu_addr);                              \
        barrier_nospec();                                       \
-       __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+                                                               \
        __gu_err;                                               \
 })
 
index 1c4385852d3ddb2bdc77ada8fcb9e4ea75ca9662..244542ae2a915698a246df455f9416040e7ed163 100644 (file)
@@ -162,6 +162,9 @@ UBSAN_SANITIZE_kprobes.o := n
 GCOV_PROFILE_kprobes-ftrace.o := n
 KCOV_INSTRUMENT_kprobes-ftrace.o := n
 UBSAN_SANITIZE_kprobes-ftrace.o := n
+GCOV_PROFILE_syscall_64.o := n
+KCOV_INSTRUMENT_syscall_64.o := n
+UBSAN_SANITIZE_syscall_64.o := n
 UBSAN_SANITIZE_vdso.o := n
 
 # Necessary for booting with kcov enabled on book3e machines
index 9a1e5d636dea999713cca517523fba51ca96b1ac..b3c9f15089b64c0be411c679cf2ec4eaddeba494 100644 (file)
@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 #ifdef CONFIG_PPC_BOOK3S
        /*
         * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
-        * touched, AMR not set, no exit work created, then this can be used.
+        * touched, no exit work created, then this can be used.
         */
        .balign IFETCH_ALIGN_BYTES
        .globl fast_interrupt_return
 fast_interrupt_return:
 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
+       kuap_check_amr r3, r4
        ld      r4,_MSR(r1)
        andi.   r0,r4,MSR_PR
        bne     .Lfast_user_interrupt_return
+       kuap_restore_amr r3
        andi.   r0,r4,MSR_RI
        li      r3,0 /* 0 return value, no EMULATE_STACK_STORE */
        bne+    .Lfast_kernel_interrupt_return
index 728ccb0f560ce7302258e9b6fa788d27c7ca8e31..ebeebab74b564b7960a5270f180721d430d04dd0 100644 (file)
@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common)
        ld      r10,SOFTE(r1)
        stb     r10,PACAIRQSOFTMASK(r13)
 
+       kuap_restore_amr r10
        EXCEPTION_RESTORE_REGS
        RFI_TO_USER_OR_KERNEL
 
@@ -2410,6 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
        GEN_COMMON facility_unavailable
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      facility_unavailable_exception
+       REST_NVGPRS(r1) /* instruction emulation may change GPRs */
        b       interrupt_return
 
        GEN_KVM facility_unavailable
@@ -2439,6 +2441,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
        GEN_COMMON h_facility_unavailable
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      facility_unavailable_exception
+       REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
        b       interrupt_return
 
        GEN_KVM h_facility_unavailable
index daaa153950c28890127c273404f6bf6c163033e0..97c887950c3ca19d3156b8cc1e7aca6ceaef79f7 100644 (file)
@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
        andis.  r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
 #endif
        bne     handle_page_fault_tramp_2       /* if not, try to put a PTE */
-       rlwinm  r3, r5, 32 - 24, 30, 30         /* DSISR_STORE -> _PAGE_RW */
+       rlwinm  r3, r5, 32 - 15, 21, 21         /* DSISR_STORE -> _PAGE_RW */
        bl      hash_page
        b       handle_page_fault_tramp_1
 FTR_SECTION_ELSE
@@ -497,6 +497,7 @@ InstructionTLBMiss:
        andc.   r1,r1,r0                /* check access & ~permission */
        bne-    InstructionAddressInvalid /* return if access not permitted */
        /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        ori     r1, r1, 0xe06           /* clear out reserved bits */
        andc    r1, r0, r1              /* PP = user? 1 : 0 */
 BEGIN_FTR_SECTION
@@ -564,8 +565,9 @@ DataLoadTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
-       rlwinm  r1,r0,0,30,30           /* _PAGE_RW -> PP msb */
-       rlwimi  r0,r0,1,30,30           /* _PAGE_USER -> PP msb */
+       rlwinm  r1,r0,32-9,30,30        /* _PAGE_RW -> PP msb */
+       rlwimi  r0,r0,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r0,r0,32-1,31,31        /* _PAGE_USER -> PP lsb */
        ori     r1,r1,0xe04             /* clear out reserved bits */
        andc    r1,r0,r1                /* PP = user? rw? 1: 3: 0 */
 BEGIN_FTR_SECTION
@@ -643,6 +645,7 @@ DataStoreTLBMiss:
         * we would need to update the pte atomically with lwarx/stwcx.
         */
        /* Convert linux-style PTE to low word of PPC-style PTE */
+       rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
        li      r1,0xe06                /* clear out reserved bits & PP msb */
        andc    r1,r0,r1                /* PP = user? 1: 0 */
 BEGIN_FTR_SECTION
index 9bb663977e84e586bcbec5c48f96c43233a9043d..2cec543c38f01d9c4f7cb2db577828dea548a388 100644 (file)
@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit)
 /* 0x0C00 - System Call Exception */
        START_EXCEPTION(0x0C00, SystemCall)
        SYSCALL_ENTRY   0xc00
+/*     Trap_0D is commented out to get more space for system call exception */
 
-       EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
+/*     EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
        EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
        EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
 
index e34116255ced819eeb2c936c7dd6d8e743dca7da..957abd592075481048dea9e310790a1744977b92 100644 (file)
@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void)
  * to be stored as an xattr or as an appended signature.
  *
  * To avoid duplicate signature verification as much as possible, the IMA
- * policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE
+ * policy rule for module appraisal is added only if CONFIG_MODULE_SIG
  * is not enabled.
  */
 static const char *const secure_rules[] = {
        "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
        "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
 #endif
        NULL
@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = {
        "measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
        "measure func=MODULE_CHECK template=ima-modsig",
        "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
        "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
 #endif
        NULL
index c74295a7765be884b5488d0b8bdbc4c605b50531..7b7c89cad901b8e2455dfc3f751931c8b8265dcb 100644 (file)
@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
        BUG_ON(!FULL_REGS(regs));
        BUG_ON(regs->softe != IRQS_ENABLED);
 
+       kuap_check_amr();
+
        account_cpu_user_entry();
 
 #ifdef CONFIG_PPC_SPLPAR
@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
        }
 #endif
 
-       kuap_check_amr();
-
        /*
         * This is not required for the syscall exit path, but makes the
         * stack frame look nicer. If this was initialised in the first stack
@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
        unsigned long ti_flags;
        unsigned long ret = 0;
 
+       kuap_check_amr();
+
        regs->result = r3;
 
        /* Check whether the syscall is issued inside a restartable sequence */
@@ -189,7 +191,7 @@ again:
 
        /* This pattern matches prep_irq_for_idle */
        __hard_EE_RI_disable();
-       if (unlikely(lazy_irq_pending())) {
+       if (unlikely(lazy_irq_pending_nocheck())) {
                __hard_RI_enable();
                trace_hardirqs_off();
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -204,8 +206,6 @@ again:
        local_paca->tm_scratch = regs->msr;
 #endif
 
-       kuap_check_amr();
-
        account_cpu_user_exit();
 
        return ret;
@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
        BUG_ON(!FULL_REGS(regs));
        BUG_ON(regs->softe != IRQS_ENABLED);
 
+       kuap_check_amr();
+
        local_irq_save(flags);
 
 again:
@@ -264,7 +266,7 @@ again:
 
        trace_hardirqs_on();
        __hard_EE_RI_disable();
-       if (unlikely(lazy_irq_pending())) {
+       if (unlikely(lazy_irq_pending_nocheck())) {
                __hard_RI_enable();
                trace_hardirqs_off();
                local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -292,8 +294,6 @@ again:
        local_paca->tm_scratch = regs->msr;
 #endif
 
-       kuap_check_amr();
-
        account_cpu_user_exit();
 
        return ret;
@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
        BUG_ON(regs->msr & MSR_PR);
        BUG_ON(!FULL_REGS(regs));
 
+       kuap_check_amr();
+
        if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
                clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
                ret = 1;
@@ -334,7 +336,7 @@ again:
 
                trace_hardirqs_on();
                __hard_EE_RI_disable();
-               if (unlikely(lazy_irq_pending())) {
+               if (unlikely(lazy_irq_pending_nocheck())) {
                        __hard_RI_enable();
                        irq_soft_mask_set(IRQS_ALL_DISABLED);
                        trace_hardirqs_off();
index a3951567118a9bbabd256ea418e57b346cc5c1a7..e7f8f9f1b3f46ba3da4b84fc3c44eb2560f91b96 100644 (file)
@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
        blr
 
        /*
-        * invalid clock
+        * syscall fallback
         */
 99:
-       li      r3, EINVAL
-       crset   so
+       li      r0,__NR_clock_getres
+       sc
        blr
   .cfi_endproc
 V_FUNCTION_END(__kernel_clock_getres)
index e15166b0a16db0fc805e1c1193e81bf83ee73d94..ad2f172c26a6c61100c7fc104fbbc75ae5177042 100644 (file)
@@ -521,6 +521,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
index 6d236080cb1ad0b5c28e54d744b86e3b3040575c..877d880890fe467f31fc814d82da15c4a7a0dcf9 100644 (file)
@@ -35,7 +35,7 @@ mmu_hash_lock:
 /*
  * Load a PTE into the hash table, if possible.
  * The address is in r4, and r3 contains an access flag:
- * _PAGE_RW (0x002) if a write.
+ * _PAGE_RW (0x400) if a write.
  * r9 contains the SRR1 value, from which we use the MSR_PR bit.
  * SPRG_THREAD contains the physical address of the current task's thread.
  *
@@ -69,7 +69,7 @@ _GLOBAL(hash_page)
        blt+    112f                    /* assume user more likely */
        lis     r5, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
        addi    r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
-       rlwimi  r3,r9,32-14,31,31       /* MSR_PR -> _PAGE_USER */
+       rlwimi  r3,r9,32-12,29,29       /* MSR_PR -> _PAGE_USER */
 112:
 #ifndef CONFIG_PTE_64BIT
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
@@ -94,7 +94,7 @@ _GLOBAL(hash_page)
 #else
        rlwimi  r8,r4,23,20,28          /* compute pte address */
 #endif
-       rlwinm  r0,r3,6,24,24           /* _PAGE_RW access -> _PAGE_DIRTY */
+       rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
        ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
 
        /*
@@ -310,9 +310,11 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
 
 _GLOBAL(create_hpte)
        /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
+       rlwinm  r8,r5,32-9,30,30        /* _PAGE_RW -> PP msb */
        rlwinm  r0,r5,32-6,30,30        /* _PAGE_DIRTY -> PP msb */
-       and     r8,r5,r0                /* writable if _RW & _DIRTY */
-       rlwimi  r5,r5,1,30,30           /* _PAGE_USER -> PP msb */
+       and     r8,r8,r0                /* writable if _RW & _DIRTY */
+       rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
+       rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
        ori     r8,r8,0xe04             /* clear out reserved bits */
        andc    r8,r5,r8                /* PP = user? (rw&dirty? 1: 3): 0 */
 BEGIN_FTR_SECTION
@@ -564,7 +566,7 @@ _GLOBAL(flush_hash_pages)
 33:    lwarx   r8,0,r5                 /* fetch the pte flags word */
        andi.   r0,r8,_PAGE_HASHPTE
        beq     8f                      /* done if HASHPTE is already clear */
-       rlwinm  r8,r8,0,~_PAGE_HASHPTE  /* clear HASHPTE bit */
+       rlwinm  r8,r8,0,31,29           /* clear HASHPTE bit */
        stwcx.  r8,0,r5                 /* update the pte */
        bne-    33b
 
index 62f7bfeb709eb1cdabaccce261320dd0191b3c48..a31e1a41913a5dc9c08825bfe7f766283e2056e3 100644 (file)
@@ -54,13 +54,13 @@ config RISCV
        select GENERIC_ARCH_TOPOLOGY if SMP
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MMIOWB
-       select ARCH_HAS_DEBUG_VIRTUAL
+       select ARCH_HAS_DEBUG_VIRTUAL if MMU
        select HAVE_EBPF_JIT if MMU
        select EDAC_SUPPORT
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_SET_DIRECT_MAP
        select ARCH_HAS_SET_MEMORY
-       select ARCH_HAS_STRICT_KERNEL_RWX
+       select ARCH_HAS_STRICT_KERNEL_RWX if MMU
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select SPARSEMEM_STATIC if 32BIT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
@@ -136,6 +136,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
        def_bool y
 
 config SYS_SUPPORTS_HUGETLBFS
+       depends on MMU
        def_bool y
 
 config STACKTRACE_SUPPORT
index 216286db81c94ceb124713ab81f0e28dec3365b2..d646332e44f15daa18f6913132f9684d49afec2d 100644 (file)
@@ -11,14 +11,15 @@ config SOC_SIFIVE
          This enables support for SiFive SoC platform hardware.
 
 config SOC_VIRT
-       bool "QEMU Virt Machine"
-       select POWER_RESET_SYSCON
-       select POWER_RESET_SYSCON_POWEROFF
-       select GOLDFISH
-       select RTC_DRV_GOLDFISH
-       select SIFIVE_PLIC
-       help
-         This enables support for QEMU Virt Machine.
+       bool "QEMU Virt Machine"
+       select POWER_RESET
+       select POWER_RESET_SYSCON
+       select POWER_RESET_SYSCON_POWEROFF
+       select GOLDFISH
+       select RTC_DRV_GOLDFISH if RTC_CLASS
+       select SIFIVE_PLIC
+       help
+         This enables support for QEMU Virt Machine.
 
 config SOC_KENDRYTE
        bool "Kendryte K210 SoC"
index 8e18d2c64399df91e0619852bfd5c9a4757cdc37..cec462e198ced58c82ecf530115fb079d06b03b1 100644 (file)
 #define CAUSE_IRQ_FLAG         (_AC(1, UL) << (__riscv_xlen - 1))
 
 /* Interrupt causes (minus the high bit) */
-#define IRQ_U_SOFT             0
 #define IRQ_S_SOFT             1
 #define IRQ_M_SOFT             3
-#define IRQ_U_TIMER            4
 #define IRQ_S_TIMER            5
 #define IRQ_M_TIMER            7
-#define IRQ_U_EXT              8
 #define IRQ_S_EXT              9
 #define IRQ_M_EXT              11
 
index 1bb0cd04aec386d5462f7a2eb0f8cd3dbb36c467..5ce50468aff13f0af68bf7a615944bb0e8b7e341 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _ASM_RISCV_HWCAP_H
 #define _ASM_RISCV_HWCAP_H
 
+#include <linux/bits.h>
 #include <uapi/asm/hwcap.h>
 
 #ifndef __ASSEMBLY__
@@ -22,6 +23,27 @@ enum {
 };
 
 extern unsigned long elf_hwcap;
+
+#define RISCV_ISA_EXT_a                ('a' - 'a')
+#define RISCV_ISA_EXT_c                ('c' - 'a')
+#define RISCV_ISA_EXT_d                ('d' - 'a')
+#define RISCV_ISA_EXT_f                ('f' - 'a')
+#define RISCV_ISA_EXT_h                ('h' - 'a')
+#define RISCV_ISA_EXT_i                ('i' - 'a')
+#define RISCV_ISA_EXT_m                ('m' - 'a')
+#define RISCV_ISA_EXT_s                ('s' - 'a')
+#define RISCV_ISA_EXT_u                ('u' - 'a')
+
+#define RISCV_ISA_EXT_MAX      64
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+       __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
 #endif
 
 #endif /* _ASM_RISCV_HWCAP_H */
index a2c809df2733f52238e227331b5bc7679dcc1534..56053c9838b2fcdafd09f7e90b3b0d143e3ffad2 100644 (file)
@@ -16,6 +16,8 @@
 
 #ifndef CONFIG_MMU
 #define pgprot_noncached(x)    (x)
+#define pgprot_writecombine(x) (x)
+#define pgprot_device(x)       (x)
 #endif /* CONFIG_MMU */
 
 /* Generic IO read/write.  These perform native-endian accesses. */
index bb4091ff4a21fb3229aba4e298f7e6bd2b07e239..0b2333e71fdc5d52b06f2b96d88ab7f39ff37d70 100644 (file)
@@ -9,6 +9,7 @@
  */
 #define mmiowb()       __asm__ __volatile__ ("fence o,w" : : : "memory");
 
+#include <linux/smp.h>
 #include <asm-generic/mmiowb.h>
 
 #endif /* _ASM_RISCV_MMIOWB_H */
index 0234048b12bc979a225e70f687f39ce9c9b1a28d..062efd3a1d5d1a6059d914e94a3d645daca81af2 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/interrupt.h>
 
+#ifdef CONFIG_RISCV_BASE_PMU
 #define RISCV_BASE_COUNTERS    2
 
 /*
  * The RISCV_MAX_COUNTERS parameter should be specified.
  */
 
-#ifdef CONFIG_RISCV_BASE_PMU
 #define RISCV_MAX_COUNTERS     2
-#endif
-
-#ifndef RISCV_MAX_COUNTERS
-#error "Please provide a valid RISCV_MAX_COUNTERS for the PMU."
-#endif
 
 /*
  * These are the indexes of bits in counteren register *minus* 1,
@@ -82,6 +77,7 @@ struct riscv_pmu {
        int             irq;
 };
 
+#endif
 #ifdef CONFIG_PERF_EVENTS
 #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
 #endif
index 9c188ad2e52d2f94245fd7b4984b5f4c74d3a1f7..35b60035b6b0640313938df1cd871ebd3ea004ac 100644 (file)
@@ -470,12 +470,15 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 
 #else /* CONFIG_MMU */
 
+#define PAGE_SHARED            __pgprot(0)
 #define PAGE_KERNEL            __pgprot(0)
 #define swapper_pg_dir         NULL
 #define VMALLOC_START          0
 
 #define TASK_SIZE 0xffffffffUL
 
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
+
 #endif /* !CONFIG_MMU */
 
 #define kern_addr_valid(addr)   (1) /* FIXME */
index c38df4771c095d1c03dba34d6c51bdf4dac374c2..4c5bae7ca01c8ab1b03634658894200b4a8d70bf 100644 (file)
@@ -22,14 +22,6 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
 #endif
 
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_ro(void);
-void set_kernel_text_rw(void);
-#else
-static inline void set_kernel_text_ro(void) { }
-static inline void set_kernel_text_rw(void) { }
-#endif
-
 int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 
index 86c83081044fb5bc6e50b7fe26210b416a0a1123..d8bbd3207100db2a2408aa35fadc1ac01b387e06 100644 (file)
@@ -43,7 +43,7 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
-obj-$(CONFIG_PERF_EVENTS)      += perf_event.o
+obj-$(CONFIG_RISCV_BASE_PMU)   += perf_event.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_callchain.o
 obj-$(CONFIG_HAVE_PERF_REGS)   += perf_regs.o
 obj-$(CONFIG_RISCV_SBI)                += sbi.o
index c4c33bf02369740cb2e653b4c81b1119b5fad43a..0ec22354018ce33481ff39caa3b224d36d50ee12 100644 (file)
@@ -15,8 +15,8 @@
 
 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 
-void *__cpu_up_stack_pointer[NR_CPUS];
-void *__cpu_up_task_pointer[NR_CPUS];
+void *__cpu_up_stack_pointer[NR_CPUS] __section(.data);
+void *__cpu_up_task_pointer[NR_CPUS] __section(.data);
 
 extern const struct cpu_operations cpu_ops_sbi;
 extern const struct cpu_operations cpu_ops_spinwait;
index a5ad00043104d3c16275f271649af64755c8de0e..ac202f44a67024291c0afda96b2568c3d004f75e 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/bitmap.h>
 #include <linux/of.h>
 #include <asm/processor.h>
 #include <asm/hwcap.h>
 #include <asm/switch_to.h>
 
 unsigned long elf_hwcap __read_mostly;
+
+/* Host ISA bitmap */
+static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
+
 #ifdef CONFIG_FPU
 bool has_fpu __read_mostly;
 #endif
 
+/**
+ * riscv_isa_extension_base() - Get base extension word
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * Return: base extension word as unsigned long value
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
+{
+       if (!isa_bitmap)
+               return riscv_isa[0];
+       return isa_bitmap[0];
+}
+EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
+
+/**
+ * __riscv_isa_extension_available() - Check whether given extension
+ * is available or not
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * @bit: bit position of the desired extension
+ * Return: true or false
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+{
+       const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
+
+       if (bit >= RISCV_ISA_EXT_MAX)
+               return false;
+
+       return test_bit(bit, bmap) ? true : false;
+}
+EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+
 void riscv_fill_hwcap(void)
 {
        struct device_node *node;
        const char *isa;
-       size_t i;
+       char print_str[BITS_PER_LONG + 1];
+       size_t i, j, isa_len;
        static unsigned long isa2hwcap[256] = {0};
 
        isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
@@ -33,8 +76,11 @@ void riscv_fill_hwcap(void)
 
        elf_hwcap = 0;
 
+       bitmap_zero(riscv_isa, RISCV_ISA_EXT_MAX);
+
        for_each_of_cpu_node(node) {
                unsigned long this_hwcap = 0;
+               unsigned long this_isa = 0;
 
                if (riscv_of_processor_hartid(node) < 0)
                        continue;
@@ -44,8 +90,24 @@ void riscv_fill_hwcap(void)
                        continue;
                }
 
-               for (i = 0; i < strlen(isa); ++i)
+               i = 0;
+               isa_len = strlen(isa);
+#if IS_ENABLED(CONFIG_32BIT)
+               if (!strncmp(isa, "rv32", 4))
+                       i += 4;
+#elif IS_ENABLED(CONFIG_64BIT)
+               if (!strncmp(isa, "rv64", 4))
+                       i += 4;
+#endif
+               for (; i < isa_len; ++i) {
                        this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+                       /*
+                        * TODO: X, Y and Z extension parsing for Host ISA
+                        * bitmap will be added in-future.
+                        */
+                       if ('a' <= isa[i] && isa[i] < 'x')
+                               this_isa |= (1UL << (isa[i] - 'a'));
+               }
 
                /*
                 * All "okay" hart should have same isa. Set HWCAP based on
@@ -56,6 +118,11 @@ void riscv_fill_hwcap(void)
                        elf_hwcap &= this_hwcap;
                else
                        elf_hwcap = this_hwcap;
+
+               if (riscv_isa[0])
+                       riscv_isa[0] &= this_isa;
+               else
+                       riscv_isa[0] = this_isa;
        }
 
        /* We don't support systems with F but without D, so mask those out
@@ -65,7 +132,17 @@ void riscv_fill_hwcap(void)
                elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
        }
 
-       pr_info("elf_hwcap is 0x%lx\n", elf_hwcap);
+       memset(print_str, 0, sizeof(print_str));
+       for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+               if (riscv_isa[0] & BIT_MASK(i))
+                       print_str[j++] = (char)('a' + i);
+       pr_info("riscv: ISA extensions %s\n", print_str);
+
+       memset(print_str, 0, sizeof(print_str));
+       for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+               if (elf_hwcap & BIT_MASK(i))
+                       print_str[j++] = (char)('a' + i);
+       pr_info("riscv: ELF capabilities %s\n", print_str);
 
 #ifdef CONFIG_FPU
        if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
index 91626d9ae5f23d5304d3eabd289c01b09a559991..c835f0362d94e96b31db24dde30eff715cb142e1 100644 (file)
@@ -147,7 +147,7 @@ static int riscv_map_hw_event(u64 config)
        return riscv_pmu->hw_events[config];
 }
 
-int riscv_map_cache_decode(u64 config, unsigned int *type,
+static int riscv_map_cache_decode(u64 config, unsigned int *type,
                           unsigned int *op, unsigned int *result)
 {
        return -ENOENT;
@@ -342,7 +342,7 @@ static void riscv_pmu_del(struct perf_event *event, int flags)
 
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
-irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
+static irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
 {
        return IRQ_NONE;
 }
@@ -361,7 +361,7 @@ static int reserve_pmc_hardware(void)
        return err;
 }
 
-void release_pmc_hardware(void)
+static void release_pmc_hardware(void)
 {
        mutex_lock(&pmc_reserve_mutex);
        if (riscv_pmu->irq >= 0)
@@ -464,7 +464,7 @@ static const struct of_device_id riscv_pmu_of_ids[] = {
        { /* sentinel value */ }
 };
 
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
 {
        struct device_node *node = of_find_node_by_type(NULL, "pmu");
        const struct of_device_id *of_id;
index 610c11e91606878985ef243cc3151ffcec09a5c3..824d117cf202b34a3f538e9c1b766873b8548bb7 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/switch_to.h>
 #include <asm/thread_info.h>
 
-unsigned long gp_in_global __asm__("gp");
+register unsigned long gp_in_global __asm__("gp");
 
 extern asmlinkage void ret_from_fork(void);
 extern asmlinkage void ret_from_kernel_thread(void);
index 7c24da59bccf619ee4b5c3725290a68c6f531440..f383ef5672b20f1a43c90a33aa06d955f34d5482 100644 (file)
@@ -102,7 +102,7 @@ void sbi_shutdown(void)
 {
        sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
 }
-EXPORT_SYMBOL(sbi_set_timer);
+EXPORT_SYMBOL(sbi_shutdown);
 
 /**
  * sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
@@ -113,7 +113,7 @@ void sbi_clear_ipi(void)
 {
        sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
 }
-EXPORT_SYMBOL(sbi_shutdown);
+EXPORT_SYMBOL(sbi_clear_ipi);
 
 /**
  * sbi_set_timer_v01() - Program the timer for next timer event.
@@ -167,6 +167,11 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
 
        return result;
 }
+
+static void sbi_set_power_off(void)
+{
+       pm_power_off = sbi_shutdown;
+}
 #else
 static void __sbi_set_timer_v01(uint64_t stime_value)
 {
@@ -191,6 +196,8 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
 
        return 0;
 }
+
+static void sbi_set_power_off(void) {}
 #endif /* CONFIG_RISCV_SBI_V01 */
 
 static void __sbi_set_timer_v02(uint64_t stime_value)
@@ -540,16 +547,12 @@ static inline long sbi_get_firmware_version(void)
        return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
 }
 
-static void sbi_power_off(void)
-{
-       sbi_shutdown();
-}
 
 int __init sbi_init(void)
 {
        int ret;
 
-       pm_power_off = sbi_power_off;
+       sbi_set_power_off();
        ret = sbi_get_spec_version();
        if (ret > 0)
                sbi_spec_version = ret;
index e0a6293093f12ad51919842de90d47030547eea8..a65a8fa0c22d690f600afd5404bd519d48034808 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/profile.h>
 #include <linux/smp.h>
 #include <linux/sched.h>
@@ -63,6 +64,7 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
        for_each_cpu(cpu, in)
                cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
 }
+EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
 
 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 {
index 02087fe539c61514b6e276029231e717b31126b6..837b9b38f825cf254bc1963325e3abd670e1ef97 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/stacktrace.h>
 #include <linux/ftrace.h>
 
+register unsigned long sp_in_global __asm__("sp");
+
 #ifdef CONFIG_FRAME_POINTER
 
 struct stackframe {
@@ -19,8 +21,6 @@ struct stackframe {
        unsigned long ra;
 };
 
-register unsigned long sp_in_global __asm__("sp");
-
 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                             bool (*fn)(unsigned long, void *), void *arg)
 {
@@ -65,7 +65,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
 
 #else /* !CONFIG_FRAME_POINTER */
 
-static void notrace walk_stackframe(struct task_struct *task,
+void notrace walk_stackframe(struct task_struct *task,
        struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
 {
        unsigned long sp, pc;
index 33b16f4212f7a5fceb7feeff0a462e295bb4c0c0..4c8b2a4a6a7096bbaaa4d67b8ea1cfafbcb7eaf1 100644 (file)
@@ -12,7 +12,7 @@ vdso-syms += getcpu
 vdso-syms += flush_icache
 
 # Files to link into the vdso
-obj-vdso = $(patsubst %, %.o, $(vdso-syms))
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
 
 # Build rules
 targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
@@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
        $(call if_changed,vdsold)
 
 # We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO.  With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
        -Wl,--build-id -Wl,--hash-style=both
 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
        $(call if_changed,vdsold)
 
-LDFLAGS_vdso-syms.o := -r -R
+LDFLAGS_vdso-syms.o := -r --just-symbols
 $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
        $(call if_changed,ld)
 
diff --git a/arch/riscv/kernel/vdso/note.S b/arch/riscv/kernel/vdso/note.S
new file mode 100644 (file)
index 0000000..2a956c9
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
index b55be44ff9bdf3ebad11a952b56b366ab2310ed7..736de6c8739fcf3ab005864f5ed5701c0b0f6fd1 100644 (file)
@@ -47,7 +47,7 @@ static void setup_zero_page(void)
        memset((void *)empty_zero_page, 0, PAGE_SIZE);
 }
 
-#ifdef CONFIG_DEBUG_VM
+#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
 static inline void print_mlk(char *name, unsigned long b, unsigned long t)
 {
        pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
@@ -150,7 +150,8 @@ void __init setup_bootmem(void)
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
        set_max_mapnr(PFN_DOWN(mem_size));
-       max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       max_low_pfn = max_pfn;
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
@@ -501,22 +502,6 @@ static inline void setup_vm_final(void)
 #endif /* CONFIG_MMU */
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void)
-{
-       unsigned long text_start = (unsigned long)_text;
-       unsigned long text_end = (unsigned long)_etext;
-
-       set_memory_rw(text_start, (text_end - text_start) >> PAGE_SHIFT);
-}
-
-void set_kernel_text_ro(void)
-{
-       unsigned long text_start = (unsigned long)_text;
-       unsigned long text_end = (unsigned long)_etext;
-
-       set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
-}
-
 void mark_rodata_ro(void)
 {
        unsigned long text_start = (unsigned long)_text;
index cd060b5dd8fdd1f1595a47e1edeca75ec8651aa4..e4dc64cc9c555c11abb747e642faf6fecc910d71 100644 (file)
@@ -8,6 +8,10 @@
 #include <linux/slab.h>
 #include <asm/pci_insn.h>
 
+/* I/O size constraints */
+#define ZPCI_MAX_READ_SIZE     8
+#define ZPCI_MAX_WRITE_SIZE    128
+
 /* I/O Map */
 #define ZPCI_IOMAP_SHIFT               48
 #define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
@@ -140,7 +144,8 @@ static inline int zpci_memcpy_fromio(void *dst,
 
        while (n > 0) {
                size = zpci_get_max_write_size((u64 __force) src,
-                                              (u64) dst, n, 8);
+                                              (u64) dst, n,
+                                              ZPCI_MAX_READ_SIZE);
                rc = zpci_read_single(dst, src, size);
                if (rc)
                        break;
@@ -161,7 +166,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
 
        while (n > 0) {
                size = zpci_get_max_write_size((u64 __force) dst,
-                                              (u64) src, n, 128);
+                                              (u64) src, n,
+                                              ZPCI_MAX_WRITE_SIZE);
                if (size > 8) /* main path */
                        rc = zpci_write_block(dst, src, size);
                else
index 8415ae7d2a23f5173e1628839ef3a68703b31890..f9e4baa64b675caa5ad5e1d9eccd3d01716fe721 100644 (file)
@@ -151,7 +151,7 @@ static int kexec_file_add_initrd(struct kimage *image,
                buf.mem += crashk_res.start;
        buf.memsz = buf.bufsz;
 
-       data->parm->initrd_start = buf.mem;
+       data->parm->initrd_start = data->memsz;
        data->parm->initrd_size = buf.memsz;
        data->memsz += buf.memsz;
 
index d5035de9020e738819a06b9e0ecf74b77879ac4d..b7182cec48dc4b260e8e1dfcb32cccadb8acef2d 100644 (file)
@@ -28,6 +28,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                break;
        case R_390_64:          /* Direct 64 bit.  */
        case R_390_GLOB_DAT:
+       case R_390_JMP_SLOT:
                *(u64 *)loc = val;
                break;
        case R_390_PC16:        /* PC relative 16 bit.  */
index 5dcf9ff1282868bb9bb945a4d0f99117ad2c7fdb..d05bb040fd427c1b25e1c9693930aa2ec01f0412 100644 (file)
@@ -545,6 +545,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_AIS:
        case KVM_CAP_S390_AIS_MIGRATION:
        case KVM_CAP_S390_VCPU_RESETS:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_S390_HPAGE_1M:
index 69a824f9ef0b46638cf0c921ca5bf0a0aba28be3..8938936424150569eb1bcd78ce1fa0e54e1eacec 100644 (file)
@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
         * available for the guest are AQIC and TAPQ with the t bit set
         * since we do not set IC.3 (FIII) we currently will only intercept
         * the AQIC function code.
+        * Note: running nested under z/VM can result in intercepts for other
+        * function codes, e.g. PQAP(QCI). We do not support this and bail out.
         */
        reg0 = vcpu->run->s.regs.gprs[0];
        fc = (reg0 >> 24) & 0xff;
-       if (WARN_ON_ONCE(fc != 0x03))
+       if (fc != 0x03)
                return -EOPNOTSUPP;
 
        /* PQAP instruction is allowed for guest kernel only */
index c4f8039a35e8dda0bc20999b7db089e3ab09b613..0267405ab7c69fec93f73df3ed35f381b03c7f38 100644 (file)
@@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
 {
        mm_segment_t old_fs;
        unsigned long asce, cr;
+       unsigned long flags;
 
        old_fs = current->thread.mm_segment;
        if (old_fs & 1)
                return old_fs;
+       /* protect against a concurrent page table upgrade */
+       local_irq_save(flags);
        current->thread.mm_segment |= 1;
        asce = S390_lowcore.kernel_asce;
        if (likely(old_fs == USER_DS)) {
@@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
                __ctl_load(asce, 7, 7);
                set_cpu_flag(CIF_ASCE_SECONDARY);
        }
+       local_irq_restore(flags);
        return old_fs;
 }
 EXPORT_SYMBOL(enable_sacf_uaccess);
index f01daddcbc5eb5baecf71b6b7d11a2ec7dfe2fc2..4632d4e26b66218d0bc63a368b16eaddf48a6eb4 100644 (file)
@@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                rste &= ~_SEGMENT_ENTRY_NOEXEC;
 
        /* Set correct table type for 2G hugepages */
-       if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
-               rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
-       else
+       if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
+               if (likely(pte_present(pte)))
+                       rste |= _REGION3_ENTRY_LARGE;
+               rste |= _REGION_ENTRY_TYPE_R3;
+       } else if (likely(pte_present(pte)))
                rste |= _SEGMENT_ENTRY_LARGE;
+
        clear_huge_pte_skeys(mm, rste);
        pte_val(*ptep) = rste;
 }
index 498c98a312f44c0318d3a0ee051b0a65fd5078f6..fff169d6471188cd8f58669316848e34969d12cd 100644 (file)
@@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
 {
        struct mm_struct *mm = arg;
 
-       if (current->active_mm == mm)
-               set_user_asce(mm);
+       /* we must change all active ASCEs to avoid the creation of new TLBs */
+       if (current->active_mm == mm) {
+               S390_lowcore.user_asce = mm->context.asce;
+               if (current->thread.mm_segment == USER_DS) {
+                       __ctl_load(S390_lowcore.user_asce, 1, 1);
+                       /* Mark user-ASCE present in CR1 */
+                       clear_cpu_flag(CIF_ASCE_PRIMARY);
+               }
+               if (current->thread.mm_segment == USER_DS_SACF) {
+                       __ctl_load(S390_lowcore.user_asce, 7, 7);
+                       /* enable_sacf_uaccess does all or nothing */
+                       WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
+               }
+       }
        __tlb_flush_local();
 }
 
index 7d42a8794f10d6bdc5133150411c4f42c1a52e39..020a2c514d9612b1ad6f69b691f6b720274f2fea 100644 (file)
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
+#include <asm/pci_io.h>
+#include <asm/pci_debug.h>
+
+static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
+{
+       struct {
+               u64 offset;
+               u8 cc;
+               u8 status;
+       } data = {offset, cc, status};
+
+       zpci_err_hex(&data, sizeof(data));
+}
+
+static inline int __pcistb_mio_inuser(
+               void __iomem *ioaddr, const void __user *src,
+               u64 len, u8 *status)
+{
+       int cc = -ENXIO;
+
+       asm volatile (
+               "       sacf 256\n"
+               "0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
+               "1:     ipm     %[cc]\n"
+               "       srl     %[cc],28\n"
+               "2:     sacf 768\n"
+               EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+               : [cc] "+d" (cc), [len] "+d" (len)
+               : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
+               : "cc", "memory");
+       *status = len >> 24 & 0xff;
+       return cc;
+}
+
+static inline int __pcistg_mio_inuser(
+               void __iomem *ioaddr, const void __user *src,
+               u64 ulen, u8 *status)
+{
+       register u64 addr asm("2") = (u64 __force) ioaddr;
+       register u64 len asm("3") = ulen;
+       int cc = -ENXIO;
+       u64 val = 0;
+       u64 cnt = ulen;
+       u8 tmp;
+
+       /*
+        * copy 0 < @len <= 8 bytes from @src into the right most bytes of
+        * a register, then store it to PCI at @ioaddr while in secondary
+        * address space. pcistg then uses the user mappings.
+        */
+       asm volatile (
+               "       sacf    256\n"
+               "0:     llgc    %[tmp],0(%[src])\n"
+               "       sllg    %[val],%[val],8\n"
+               "       aghi    %[src],1\n"
+               "       ogr     %[val],%[tmp]\n"
+               "       brctg   %[cnt],0b\n"
+               "1:     .insn   rre,0xb9d40000,%[val],%[ioaddr]\n"
+               "2:     ipm     %[cc]\n"
+               "       srl     %[cc],28\n"
+               "3:     sacf    768\n"
+               EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
+               :
+               [src] "+a" (src), [cnt] "+d" (cnt),
+               [val] "+d" (val), [tmp] "=d" (tmp),
+               [len] "+d" (len), [cc] "+d" (cc),
+               [ioaddr] "+a" (addr)
+               :: "cc", "memory");
+       *status = len >> 24 & 0xff;
+
+       /* did we read everything from user memory? */
+       if (!cc && cnt != 0)
+               cc = -EFAULT;
+
+       return cc;
+}
+
+static inline int __memcpy_toio_inuser(void __iomem *dst,
+                                  const void __user *src, size_t n)
+{
+       int size, rc = 0;
+       u8 status = 0;
+       mm_segment_t old_fs;
+
+       if (!src)
+               return -EINVAL;
+
+       old_fs = enable_sacf_uaccess();
+       while (n > 0) {
+               size = zpci_get_max_write_size((u64 __force) dst,
+                                              (u64 __force) src, n,
+                                              ZPCI_MAX_WRITE_SIZE);
+               if (size > 8) /* main path */
+                       rc = __pcistb_mio_inuser(dst, src, size, &status);
+               else
+                       rc = __pcistg_mio_inuser(dst, src, size, &status);
+               if (rc)
+                       break;
+               src += size;
+               dst += size;
+               n -= size;
+       }
+       disable_sacf_uaccess(old_fs);
+       if (rc)
+               zpci_err_mmio(rc, status, (__force u64) dst);
+       return rc;
+}
 
 static long get_pfn(unsigned long user_addr, unsigned long access,
                    unsigned long *pfn)
@@ -46,6 +153,20 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
 
        if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
                return -EINVAL;
+
+       /*
+        * Only support read access to MIO capable devices on a MIO enabled
+        * system. Otherwise we would have to check for every address if it is
+        * a special ZPCI_ADDR and we would have to do a get_pfn() which we
+        * don't need for MIO capable devices.
+        */
+       if (static_branch_likely(&have_mio)) {
+               ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
+                                       user_buffer,
+                                       length);
+               return ret;
+       }
+
        if (length > 64) {
                buf = kmalloc(length, GFP_KERNEL);
                if (!buf)
@@ -56,7 +177,8 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
        ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
        if (ret)
                goto out;
-       io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+       io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
+                       (mmio_addr & ~PAGE_MASK));
 
        ret = -EFAULT;
        if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -72,6 +194,78 @@ out:
        return ret;
 }
 
+static inline int __pcilg_mio_inuser(
+               void __user *dst, const void __iomem *ioaddr,
+               u64 ulen, u8 *status)
+{
+       register u64 addr asm("2") = (u64 __force) ioaddr;
+       register u64 len asm("3") = ulen;
+       u64 cnt = ulen;
+       int shift = ulen * 8;
+       int cc = -ENXIO;
+       u64 val, tmp;
+
+       /*
+        * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
+        * user space) into a register using pcilg then store these bytes at
+        * user address @dst
+        */
+       asm volatile (
+               "       sacf    256\n"
+               "0:     .insn   rre,0xb9d60000,%[val],%[ioaddr]\n"
+               "1:     ipm     %[cc]\n"
+               "       srl     %[cc],28\n"
+               "       ltr     %[cc],%[cc]\n"
+               "       jne     4f\n"
+               "2:     ahi     %[shift],-8\n"
+               "       srlg    %[tmp],%[val],0(%[shift])\n"
+               "3:     stc     %[tmp],0(%[dst])\n"
+               "       aghi    %[dst],1\n"
+               "       brctg   %[cnt],2b\n"
+               "4:     sacf    768\n"
+               EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
+               :
+               [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
+               [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
+               [shift] "+d" (shift)
+               :
+               [ioaddr] "a" (addr)
+               : "cc", "memory");
+
+       /* did we write everything to the user space buffer? */
+       if (!cc && cnt != 0)
+               cc = -EFAULT;
+
+       *status = len >> 24 & 0xff;
+       return cc;
+}
+
+static inline int __memcpy_fromio_inuser(void __user *dst,
+                                    const void __iomem *src,
+                                    unsigned long n)
+{
+       int size, rc = 0;
+       u8 status;
+       mm_segment_t old_fs;
+
+       old_fs = enable_sacf_uaccess();
+       while (n > 0) {
+               size = zpci_get_max_write_size((u64 __force) src,
+                                              (u64 __force) dst, n,
+                                              ZPCI_MAX_READ_SIZE);
+               rc = __pcilg_mio_inuser(dst, src, size, &status);
+               if (rc)
+                       break;
+               src += size;
+               dst += size;
+               n -= size;
+       }
+       disable_sacf_uaccess(old_fs);
+       if (rc)
+               zpci_err_mmio(rc, status, (__force u64) dst);
+       return rc;
+}
+
 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
                void __user *, user_buffer, size_t, length)
 {
@@ -86,12 +280,27 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
 
        if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
                return -EINVAL;
+
+       /*
+        * Only support write access to MIO capable devices on a MIO enabled
+        * system. Otherwise we would have to check for every address if it is
+        * a special ZPCI_ADDR and we would have to do a get_pfn() which we
+        * don't need for MIO capable devices.
+        */
+       if (static_branch_likely(&have_mio)) {
+               ret = __memcpy_fromio_inuser(
+                               user_buffer, (const void __iomem *)mmio_addr,
+                               length);
+               return ret;
+       }
+
        if (length > 64) {
                buf = kmalloc(length, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
-       } else
+       } else {
                buf = local_buf;
+       }
 
        ret = get_pfn(mmio_addr, VM_READ, &pfn);
        if (ret)
index 3da561453260cbbc3f4e6da45fe267dfc0a053d2..ef01ced9e16963fd0f678014d49e4a10f5fe30bb 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __ASM_SH_SOCKIOS_H
 #define __ASM_SH_SOCKIOS_H
 
+#include <linux/time_types.h>
+
 /* Socket-level I/O control calls. */
 #define FIOGETOWN      _IOR('f', 123, int)
 #define FIOSETOWN      _IOW('f', 124, int)
index b7c94de70ccac87bdb31f4c4366b86b9ac281df3..a8c2f2615fc6fe817d20b3492cea172f28c1325b 100644 (file)
@@ -331,9 +331,9 @@ static void __init srmmu_nocache_init(void)
 
        while (vaddr < srmmu_nocache_end) {
                pgd = pgd_offset_k(vaddr);
-               p4d = p4d_offset(__nocache_fix(pgd), vaddr);
-               pud = pud_offset(__nocache_fix(p4d), vaddr);
-               pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+               p4d = p4d_offset(pgd, vaddr);
+               pud = pud_offset(p4d, vaddr);
+               pmd = pmd_offset(__nocache_fix(pud), vaddr);
                pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
 
                pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
index 91f35b266abac69f5b319f4e71294b9a20f3dff3..d29d5fdd98fac4ef7118e281ef852b58d234823d 100644 (file)
@@ -17,7 +17,7 @@
 #define TRANS_TAP_LEN strlen(TRANS_TAP)
 
 #define TRANS_GRE "gre"
-#define TRANS_GRE_LEN strlen(TRANS_RAW)
+#define TRANS_GRE_LEN strlen(TRANS_GRE)
 
 #define TRANS_L2TPV3 "l2tpv3"
 #define TRANS_L2TPV3_LEN strlen(TRANS_L2TPV3)
index 7a3208c47cfcbed276e6827bb2d4ed721282a432..36b33d62a35d8e4b8f3ecd625b65a53df9aa123c 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/xor.h>
-#include <shared/timer-internal.h>
+#include <linux/time-internal.h>
 
 /* pick an arbitrary one - measuring isn't possible with inf-cpu */
 #define XOR_SELECT_TEMPLATE(x) \
index 0a12d5a092177b7f255242025cd11d9c22736d8f..3d91f89fd85276041cfbcb3353ea26a3e10686e3 100644 (file)
@@ -11,6 +11,7 @@
 #include <sysdep/ptrace_user.h>
 #include <sysdep/syscalls.h>
 #include <linux/time-internal.h>
+#include <asm/unistd.h>
 
 void handle_syscall(struct uml_pt_regs *r)
 {
index 1197b5596d5ad8959d242bd5f1666690e368de7d..2d3f963fd6f13e14c943d9e732abded33e532c3f 100644 (file)
@@ -68,6 +68,7 @@ config X86
        select ARCH_HAS_KCOV                    if X86_64
        select ARCH_HAS_MEM_ENCRYPT
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PMEM_API                if X86_64
        select ARCH_HAS_PTE_DEVMAP              if X86_64
        select ARCH_HAS_PTE_SPECIAL
index 8f8c8e386cea7929a508952acc770a977a4a5cc4..c8b8c1a8d1fc0a052b2b1e28b41964db8ad1671f 100644 (file)
@@ -59,14 +59,14 @@ u8 buf[SETUP_SECT_MAX*512];
 #define PECOFF_COMPAT_RESERVE 0x0
 #endif
 
-unsigned long efi32_stub_entry;
-unsigned long efi64_stub_entry;
-unsigned long efi_pe_entry;
-unsigned long efi32_pe_entry;
-unsigned long kernel_info;
-unsigned long startup_64;
-unsigned long _ehead;
-unsigned long _end;
+static unsigned long efi32_stub_entry;
+static unsigned long efi64_stub_entry;
+static unsigned long efi_pe_entry;
+static unsigned long efi32_pe_entry;
+static unsigned long kernel_info;
+static unsigned long startup_64;
+static unsigned long _ehead;
+static unsigned long _end;
 
 /*----------------------------------------------------------------------*/
 
index 06ef2d4a470171aecc249d7bbddedccebf67e04e..6737bcea1fa148a129cbc334c92778889dc41963 100644 (file)
@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2s_state *state,
                           const u32 inc)
 {
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
+       BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
 
        if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
                blake2s_compress_generic(state, block, nblocks, inc);
                return;
        }
 
-       for (;;) {
+       do {
                const size_t blocks = min_t(size_t, nblocks,
-                                           PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
+                                           SZ_4K / BLAKE2S_BLOCK_SIZE);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) &&
@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2s_state *state,
                kernel_fpu_end();
 
                nblocks -= blocks;
-               if (!nblocks)
-                       break;
                block += blocks * BLAKE2S_BLOCK_SIZE;
-       }
+       } while (nblocks);
 }
 EXPORT_SYMBOL(blake2s_compress_arch);
 
index b412c21ee06e29a7d29aa94b6ada32e4da09484d..22250091cdbec81e24d9a9e2345e2966b511a2a0 100644 (file)
@@ -153,9 +153,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            bytes <= CHACHA_BLOCK_SIZE)
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_fpu_begin();
-       chacha_dosimd(state, dst, src, bytes, nrounds);
-       kernel_fpu_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_fpu_begin();
+               chacha_dosimd(state, dst, src, todo, nrounds);
+               kernel_fpu_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index f7567cbd35b6958dbac7e54f4a4406b98bf0fa79..80fcb85736e1d388830eb08dd6054c43d302c8d1 100644 (file)
@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_fpu_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
index a661ede3b5cfac684f38249bf26ad3ca24e00b72..cc6b7c1a2705d48c9c39463e1362f178cc3c0b02 100644 (file)
@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_fpu_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
index 6dfec19f7d579aa7b717dc511861324e3d3a86e8..dfe921efa9b25cf2816564a84bd50412a5d73770 100644 (file)
@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
        struct poly1305_arch_internal *state = ctx;
 
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
-                    PAGE_SIZE % POLY1305_BLOCK_SIZE);
+       BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
+                    SZ_4K % POLY1305_BLOCK_SIZE);
 
        if (!static_branch_likely(&poly1305_use_avx) ||
            (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                return;
        }
 
-       for (;;) {
-               const size_t bytes = min_t(size_t, len, PAGE_SIZE);
+       do {
+               const size_t bytes = min_t(size_t, len, SZ_4K);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                else
                        poly1305_blocks_avx(ctx, inp, bytes, padbit);
                kernel_fpu_end();
+
                len -= bytes;
-               if (!len)
-                       break;
                inp += bytes;
-       }
+       } while (len);
 }
 
 static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
index 0789e13ece905c1c8c006fe27d4df96ef98d98cf..1c7f13bb67286238cb555ff1238557b6458b179e 100644 (file)
@@ -98,13 +98,6 @@ For 32-bit we have the following conventions - kernel is built with
 #define SIZEOF_PTREGS  21*8
 
 .macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
-       /*
-        * Push registers and sanitize registers of values that a
-        * speculation attack might otherwise want to exploit. The
-        * lower registers are likely clobbered well before they
-        * could be put to use in a speculative execution gadget.
-        * Interleave XOR with PUSH for better uop scheduling:
-        */
        .if \save_ret
        pushq   %rsi            /* pt_regs->si */
        movq    8(%rsp), %rsi   /* temporarily store the return address in %rsi */
@@ -114,34 +107,43 @@ For 32-bit we have the following conventions - kernel is built with
        pushq   %rsi            /* pt_regs->si */
        .endif
        pushq   \rdx            /* pt_regs->dx */
-       xorl    %edx, %edx      /* nospec   dx */
        pushq   %rcx            /* pt_regs->cx */
-       xorl    %ecx, %ecx      /* nospec   cx */
        pushq   \rax            /* pt_regs->ax */
        pushq   %r8             /* pt_regs->r8 */
-       xorl    %r8d, %r8d      /* nospec   r8 */
        pushq   %r9             /* pt_regs->r9 */
-       xorl    %r9d, %r9d      /* nospec   r9 */
        pushq   %r10            /* pt_regs->r10 */
-       xorl    %r10d, %r10d    /* nospec   r10 */
        pushq   %r11            /* pt_regs->r11 */
-       xorl    %r11d, %r11d    /* nospec   r11*/
        pushq   %rbx            /* pt_regs->rbx */
-       xorl    %ebx, %ebx      /* nospec   rbx*/
        pushq   %rbp            /* pt_regs->rbp */
-       xorl    %ebp, %ebp      /* nospec   rbp*/
        pushq   %r12            /* pt_regs->r12 */
-       xorl    %r12d, %r12d    /* nospec   r12*/
        pushq   %r13            /* pt_regs->r13 */
-       xorl    %r13d, %r13d    /* nospec   r13*/
        pushq   %r14            /* pt_regs->r14 */
-       xorl    %r14d, %r14d    /* nospec   r14*/
        pushq   %r15            /* pt_regs->r15 */
-       xorl    %r15d, %r15d    /* nospec   r15*/
        UNWIND_HINT_REGS
+
        .if \save_ret
        pushq   %rsi            /* return address on top of stack */
        .endif
+
+       /*
+        * Sanitize registers of values that a speculation attack might
+        * otherwise want to exploit. The lower registers are likely clobbered
+        * well before they could be put to use in a speculative execution
+        * gadget.
+        */
+       xorl    %edx,  %edx     /* nospec dx  */
+       xorl    %ecx,  %ecx     /* nospec cx  */
+       xorl    %r8d,  %r8d     /* nospec r8  */
+       xorl    %r9d,  %r9d     /* nospec r9  */
+       xorl    %r10d, %r10d    /* nospec r10 */
+       xorl    %r11d, %r11d    /* nospec r11 */
+       xorl    %ebx,  %ebx     /* nospec rbx */
+       xorl    %ebp,  %ebp     /* nospec rbp */
+       xorl    %r12d, %r12d    /* nospec r12 */
+       xorl    %r13d, %r13d    /* nospec r13 */
+       xorl    %r14d, %r14d    /* nospec r14 */
+       xorl    %r15d, %r15d    /* nospec r15 */
+
 .endm
 
 .macro POP_REGS pop_rdi=1 skip_r11rcx=0
index 0e9504fabe52609917c4aa3462f4faff1b1e32b5..3063aa9090f9a7927143fb929fde7ec7a8150d81 100644 (file)
@@ -249,7 +249,6 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
         */
 syscall_return_via_sysret:
        /* rcx and r11 are already restored (see code above) */
-       UNWIND_HINT_EMPTY
        POP_REGS pop_rdi=0 skip_r11rcx=1
 
        /*
@@ -258,6 +257,7 @@ syscall_return_via_sysret:
         */
        movq    %rsp, %rdi
        movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+       UNWIND_HINT_EMPTY
 
        pushq   RSP-RDI(%rdi)   /* RSP */
        pushq   (%rdi)          /* RDI */
@@ -279,8 +279,7 @@ SYM_CODE_END(entry_SYSCALL_64)
  * %rdi: prev task
  * %rsi: next task
  */
-SYM_CODE_START(__switch_to_asm)
-       UNWIND_HINT_FUNC
+SYM_FUNC_START(__switch_to_asm)
        /*
         * Save callee-saved registers
         * This must match the order in inactive_task_frame
@@ -321,7 +320,7 @@ SYM_CODE_START(__switch_to_asm)
        popq    %rbp
 
        jmp     __switch_to
-SYM_CODE_END(__switch_to_asm)
+SYM_FUNC_END(__switch_to_asm)
 
 /*
  * A newly forked process directly context switches into this address.
@@ -512,7 +511,7 @@ SYM_CODE_END(spurious_entries_start)
  * +----------------------------------------------------+
  */
 SYM_CODE_START(interrupt_entry)
-       UNWIND_HINT_FUNC
+       UNWIND_HINT_IRET_REGS offset=16
        ASM_CLAC
        cld
 
@@ -544,9 +543,9 @@ SYM_CODE_START(interrupt_entry)
        pushq   5*8(%rdi)               /* regs->eflags */
        pushq   4*8(%rdi)               /* regs->cs */
        pushq   3*8(%rdi)               /* regs->ip */
+       UNWIND_HINT_IRET_REGS
        pushq   2*8(%rdi)               /* regs->orig_ax */
        pushq   8(%rdi)                 /* return address */
-       UNWIND_HINT_FUNC
 
        movq    (%rdi), %rdi
        jmp     2f
@@ -637,6 +636,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
         */
        movq    %rsp, %rdi
        movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+       UNWIND_HINT_EMPTY
 
        /* Copy the IRET frame to the trampoline stack. */
        pushq   6*8(%rdi)       /* SS */
@@ -1739,7 +1739,7 @@ SYM_CODE_START(rewind_stack_do_exit)
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rax
        leaq    -PTREGS_SIZE(%rax), %rsp
-       UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
+       UNWIND_HINT_REGS
 
        call    do_exit
 SYM_CODE_END(rewind_stack_do_exit)
index 624f5d9b0f79f9c37e2b6e9fad21a6ee31ea7e3f..acf76b466db6b1332e829b1863fa5171f4920664 100644 (file)
@@ -73,7 +73,8 @@ static int hv_cpu_init(unsigned int cpu)
        struct page *pg;
 
        input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
-       pg = alloc_page(GFP_KERNEL);
+       /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
+       pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
        if (unlikely(!pg))
                return -ENOMEM;
        *input_arg = page_address(pg);
@@ -225,10 +226,18 @@ static int hv_cpu_die(unsigned int cpu)
 
        rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
        if (re_ctrl.target_vp == hv_vp_index[cpu]) {
-               /* Reassign to some other online CPU */
+               /*
+                * Reassign reenlightenment notifications to some other online
+                * CPU or just disable the feature if there are no online CPUs
+                * left (happens on hibernation).
+                */
                new_cpu = cpumask_any_but(cpu_online_mask, cpu);
 
-               re_ctrl.target_vp = hv_vp_index[new_cpu];
+               if (new_cpu < nr_cpu_ids)
+                       re_ctrl.target_vp = hv_vp_index[new_cpu];
+               else
+                       re_ctrl.enabled = 0;
+
                wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
        }
 
@@ -254,6 +263,7 @@ static int __init hv_pci_init(void)
 static int hv_suspend(void)
 {
        union hv_x64_msr_hypercall_contents hypercall_msr;
+       int ret;
 
        /*
         * Reset the hypercall page as it is going to be invalidated
@@ -270,12 +280,17 @@ static int hv_suspend(void)
        hypercall_msr.enable = 0;
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-       return 0;
+       ret = hv_cpu_die(0);
+       return ret;
 }
 
 static void hv_resume(void)
 {
        union hv_x64_msr_hypercall_contents hypercall_msr;
+       int ret;
+
+       ret = hv_cpu_init(0);
+       WARN_ON(ret);
 
        /* Re-enable the hypercall page */
        rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -286,8 +301,16 @@ static void hv_resume(void)
 
        hv_hypercall_pg = hv_hypercall_pg_saved;
        hv_hypercall_pg_saved = NULL;
+
+       /*
+        * Reenlightenment notifications are disabled by hv_cpu_die(0),
+        * reenable them here if hv_reenlightenment_cb was previously set.
+        */
+       if (hv_reenlightenment_cb)
+               set_hv_tscchange_cb(hv_reenlightenment_cb);
 }
 
+/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
 static struct syscore_ops hv_syscore_ops = {
        .suspend        = hv_suspend,
        .resume         = hv_resume,
index 53f246e9df5a60da73143cbc1f8a53185ccfc26b..0367efdc5b7a8eaf724949c840c6ec3f8ceaf643 100644 (file)
@@ -52,9 +52,9 @@ static __always_inline void
 arch_set_bit(long nr, volatile unsigned long *addr)
 {
        if (__builtin_constant_p(nr)) {
-               asm volatile(LOCK_PREFIX "orb %1,%0"
+               asm volatile(LOCK_PREFIX "orb %b1,%0"
                        : CONST_MASK_ADDR(nr, addr)
-                       : "iq" (CONST_MASK(nr) & 0xff)
+                       : "iq" (CONST_MASK(nr))
                        : "memory");
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
@@ -72,9 +72,9 @@ static __always_inline void
 arch_clear_bit(long nr, volatile unsigned long *addr)
 {
        if (__builtin_constant_p(nr)) {
-               asm volatile(LOCK_PREFIX "andb %1,%0"
+               asm volatile(LOCK_PREFIX "andb %b1,%0"
                        : CONST_MASK_ADDR(nr, addr)
-                       : "iq" (CONST_MASK(nr) ^ 0xff));
+                       : "iq" (~CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
                        : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
@@ -123,9 +123,9 @@ static __always_inline void
 arch_change_bit(long nr, volatile unsigned long *addr)
 {
        if (__builtin_constant_p(nr)) {
-               asm volatile(LOCK_PREFIX "xorb %1,%0"
+               asm volatile(LOCK_PREFIX "xorb %b1,%0"
                        : CONST_MASK_ADDR(nr, addr)
-                       : "iq" ((u8)CONST_MASK(nr)));
+                       : "iq" (CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
                        : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
index 7e31f7f1bb062c8fdce18323ea308daf5c6ce647..49bd6cf3eec919d8896797b83d81b7600b6e456a 100644 (file)
@@ -3,7 +3,7 @@
 #define _ASM_X86_DEVICE_H
 
 struct dev_archdata {
-#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
+#ifdef CONFIG_IOMMU_API
        void *iommu; /* hook for IOMMU specific extension */
 #endif
 };
index 00f7cf45e6999b8edb776784effcb6bcda778ddb..8e95aa4b0d172362263c178de77c7b324de39263 100644 (file)
@@ -74,7 +74,7 @@
 #define MAX_DMA_PFN   ((16UL * 1024 * 1024) >> PAGE_SHIFT)
 
 /* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
 
 #ifdef CONFIG_X86_32
 /* The maximum address that we can perform a DMA transfer to on this platform */
index 85be2f5062728f13f61e84ed9efa6d622e96f5dd..84b9449be0801b168d7038c87c105e93d48a8d96 100644 (file)
@@ -56,16 +56,23 @@ struct dyn_arch_ftrace {
 
 #ifndef __ASSEMBLY__
 
+#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
+extern void set_ftrace_ops_ro(void);
+#else
+static inline void set_ftrace_ops_ro(void) { }
+#endif
+
 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 {
        /*
         * Compare the symbol name with the system call name. Skip the
-        * "__x64_sys", "__ia32_sys" or simple "sys" prefix.
+        * "__x64_sys", "__ia32_sys", "__do_sys" or simple "sys" prefix.
         */
        return !strcmp(sym + 3, name + 3) ||
                (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
-               (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
+               (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)) ||
+               (!strncmp(sym, "__do_sys", 8) && !strcmp(sym + 8, name + 3));
 }
 
 #ifndef COMPILE_OFFSETS
index 07344d82e88ee6b28e4bb2040932eb1135a3b4a3..ac1a99ffbd8d263e4dc2e8a6cba3b589a9913d58 100644 (file)
@@ -17,7 +17,7 @@ struct task_struct;
 
 #ifdef CONFIG_X86_IOPL_IOPERM
 void io_bitmap_share(struct task_struct *tsk);
-void io_bitmap_exit(void);
+void io_bitmap_exit(struct task_struct *tsk);
 
 void native_tss_update_io_bitmap(void);
 
@@ -29,7 +29,7 @@ void native_tss_update_io_bitmap(void);
 
 #else
 static inline void io_bitmap_share(struct task_struct *tsk) { }
-static inline void io_bitmap_exit(void) { }
+static inline void io_bitmap_exit(struct task_struct *tsk) { }
 static inline void tss_update_io_bitmap(void) { }
 #endif
 
index 42a2d0d3984ab43317d0e67ad227223699233621..0a6b35353fc794c7ef6dcb4fbc38915221f1ed79 100644 (file)
@@ -578,6 +578,7 @@ struct kvm_vcpu_arch {
        unsigned long cr4;
        unsigned long cr4_guest_owned_bits;
        unsigned long cr8;
+       u32 host_pkru;
        u32 pkru;
        u32 hflags;
        u64 efer;
@@ -1093,8 +1094,6 @@ struct kvm_x86_ops {
        void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
-       u64 (*get_dr6)(struct kvm_vcpu *vcpu);
-       void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
        void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -1449,6 +1448,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu);
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
@@ -1663,8 +1663,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
 {
        /* We can only post Fixed and LowPrio IRQs */
-       return (irq->delivery_mode == dest_Fixed ||
-               irq->delivery_mode == dest_LowestPrio);
+       return (irq->delivery_mode == APIC_DM_FIXED ||
+               irq->delivery_mode == APIC_DM_LOWEST);
 }
 
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
index 1c42ecbe75cb4d2b430f12e4dfa6a95c7128739f..d30805ed323ef9d682fec37bdd9fc5a8e2f30cf6 100644 (file)
@@ -35,6 +35,8 @@ typedef int (*hyperv_fill_flush_list_func)(
        rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
 #define hv_set_synint_state(int_num, val) \
        wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
+#define hv_recommend_using_aeoi() \
+       (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
 
 #define hv_get_crash_ctl(val) \
        rdmsrl(HV_X64_MSR_CRASH_CTL, val)
index 91e29b6a86a5e9203c7e432f01c0046bf0bc561e..9804a7957f4e99fa547923995562c415e451c573 100644 (file)
 /*
  * Initialize the stackprotector canary value.
  *
- * NOTE: this must only be called from functions that never return,
+ * NOTE: this must only be called from functions that never return
  * and it must always be inlined.
+ *
+ * In addition, it should be called from a compilation unit for which
+ * stack protector is disabled. Alternatively, the caller should not end
+ * with a function call which gets tail-call optimized as that would
+ * lead to checking a modified canary value.
  */
 static __always_inline void boot_init_stack_canary(void)
 {
index 499578f7e6d7bb5020fe0c503c70b1be0f5b95f1..70fc159ebe6959fead369c46d8a143812a1bc058 100644 (file)
@@ -19,7 +19,7 @@ struct unwind_state {
 #if defined(CONFIG_UNWINDER_ORC)
        bool signal, full_regs;
        unsigned long sp, bp, ip;
-       struct pt_regs *regs;
+       struct pt_regs *regs, *prev_regs;
 #elif defined(CONFIG_UNWINDER_FRAME_POINTER)
        bool got_irq;
        unsigned long *bp, *orig_sp, ip;
index 196fdd02b8b1b3d71ac994715f7dee8fa8b5fa52..be5e2e747f507657efc74f5ed2b68ed262103fda 100644 (file)
@@ -2,8 +2,15 @@
 #ifndef _UAPI_ASM_X86_UNISTD_H
 #define _UAPI_ASM_X86_UNISTD_H
 
-/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT      0x40000000UL
+/*
+ * x32 syscall flag bit.  Some user programs expect syscall NR macros
+ * and __X32_SYSCALL_BIT to have type int, even though syscall numbers
+ * are, for practical purposes, unsigned long.
+ *
+ * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right
+ * thing regardless.
+ */
+#define __X32_SYSCALL_BIT      0x40000000
 
 #ifndef __KERNEL__
 # ifdef __i386__
index 81b9c63dae1bdee98a90701dbc9026d09bb1b359..e53dda210cd734235eace14d0deafb65bc5e8ec0 100644 (file)
@@ -352,8 +352,6 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
                 * According to Intel, MFENCE can do the serialization here.
                 */
                asm volatile("mfence" : : : "memory");
-
-               printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
                return;
        }
 
@@ -546,7 +544,7 @@ static struct clock_event_device lapic_clockevent = {
 };
 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
-static u32 hsx_deadline_rev(void)
+static __init u32 hsx_deadline_rev(void)
 {
        switch (boot_cpu_data.x86_stepping) {
        case 0x02: return 0x3a; /* EP */
@@ -556,7 +554,7 @@ static u32 hsx_deadline_rev(void)
        return ~0U;
 }
 
-static u32 bdx_deadline_rev(void)
+static __init u32 bdx_deadline_rev(void)
 {
        switch (boot_cpu_data.x86_stepping) {
        case 0x02: return 0x00000011;
@@ -568,7 +566,7 @@ static u32 bdx_deadline_rev(void)
        return ~0U;
 }
 
-static u32 skx_deadline_rev(void)
+static __init u32 skx_deadline_rev(void)
 {
        switch (boot_cpu_data.x86_stepping) {
        case 0x03: return 0x01000136;
@@ -581,7 +579,7 @@ static u32 skx_deadline_rev(void)
        return ~0U;
 }
 
-static const struct x86_cpu_id deadline_match[] = {
+static const struct x86_cpu_id deadline_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X,          &hsx_deadline_rev),
        X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X,        0x0b000020),
        X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D,        &bdx_deadline_rev),
@@ -603,18 +601,19 @@ static const struct x86_cpu_id deadline_match[] = {
        {},
 };
 
-static void apic_check_deadline_errata(void)
+static __init bool apic_validate_deadline_timer(void)
 {
        const struct x86_cpu_id *m;
        u32 rev;
 
-       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
-           boot_cpu_has(X86_FEATURE_HYPERVISOR))
-               return;
+       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+               return false;
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return true;
 
        m = x86_match_cpu(deadline_match);
        if (!m)
-               return;
+               return true;
 
        /*
         * Function pointers will have the MSB set due to address layout,
@@ -626,11 +625,12 @@ static void apic_check_deadline_errata(void)
                rev = (u32)m->driver_data;
 
        if (boot_cpu_data.microcode >= rev)
-               return;
+               return true;
 
        setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
        pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
               "please update microcode to version: 0x%x (or later)\n", rev);
+       return false;
 }
 
 /*
@@ -2092,7 +2092,8 @@ void __init init_apic_mappings(void)
 {
        unsigned int new_apicid;
 
-       apic_check_deadline_errata();
+       if (apic_validate_deadline_timer())
+               pr_debug("TSC deadline timer available\n");
 
        if (x2apic_mode) {
                boot_cpu_physical_apicid = read_apic_id();
index 87b97897a881035966bbef4693df91e8d46b547e..460ae7f66818c7505c5c3aaf3f1be37284e2cd6c 100644 (file)
@@ -183,7 +183,8 @@ recursion_check:
         */
        if (visit_mask) {
                if (*visit_mask & (1UL << info->type)) {
-                       printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
+                       if (task == current)
+                               printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
                        goto unknown;
                }
                *visit_mask |= 1UL << info->type;
index 32b153d387486835a4814c62d76eed6cc7a37dac..6a54e83d55898351d0dc48d90d74c4554d8eea0c 100644 (file)
@@ -957,18 +957,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
        return true;
 }
 
-/*
- * This is similar to user_regset_copyout(), but will not add offset to
- * the source data pointer or increment pos, count, kbuf, and ubuf.
- */
-static inline void
-__copy_xstate_to_kernel(void *kbuf, const void *data,
-                       unsigned int offset, unsigned int size, unsigned int size_total)
+static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
 {
-       if (offset < size_total) {
-               unsigned int copy = min(size, size_total - offset);
+       if (*pos < to) {
+               unsigned size = to - *pos;
+
+               if (size > *count)
+                       size = *count;
+               memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
+               *kbuf += size;
+               *pos += size;
+               *count -= size;
+       }
+}
 
-               memcpy(kbuf + offset, data, copy);
+static void copy_part(unsigned offset, unsigned size, void *from,
+                       void **kbuf, unsigned *pos, unsigned *count)
+{
+       fill_gap(offset, kbuf, pos, count);
+       if (size > *count)
+               size = *count;
+       if (size) {
+               memcpy(*kbuf, from, size);
+               *kbuf += size;
+               *pos += size;
+               *count -= size;
        }
 }
 
@@ -981,8 +994,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data,
  */
 int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
 {
-       unsigned int offset, size;
        struct xstate_header header;
+       const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+       unsigned count = size_total;
        int i;
 
        /*
@@ -998,46 +1012,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
        header.xfeatures = xsave->header.xfeatures;
        header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
 
+       if (header.xfeatures & XFEATURE_MASK_FP)
+               copy_part(0, off_mxcsr,
+                         &xsave->i387, &kbuf, &offset_start, &count);
+       if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
+               copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
+                         &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
+       if (header.xfeatures & XFEATURE_MASK_FP)
+               copy_part(offsetof(struct fxregs_state, st_space), 128,
+                         &xsave->i387.st_space, &kbuf, &offset_start, &count);
+       if (header.xfeatures & XFEATURE_MASK_SSE)
+               copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
+                         &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
+       /*
+        * Fill xsave->i387.sw_reserved value for ptrace frame:
+        */
+       copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
+                 xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
        /*
         * Copy xregs_state->header:
         */
-       offset = offsetof(struct xregs_state, header);
-       size = sizeof(header);
-
-       __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
+       copy_part(offsetof(struct xregs_state, header), sizeof(header),
+                 &header, &kbuf, &offset_start, &count);
 
-       for (i = 0; i < XFEATURE_MAX; i++) {
+       for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
                /*
                 * Copy only in-use xstates:
                 */
                if ((header.xfeatures >> i) & 1) {
                        void *src = __raw_xsave_addr(xsave, i);
 
-                       offset = xstate_offsets[i];
-                       size = xstate_sizes[i];
-
-                       /* The next component has to fit fully into the output buffer: */
-                       if (offset + size > size_total)
-                               break;
-
-                       __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
+                       copy_part(xstate_offsets[i], xstate_sizes[i],
+                                 src, &kbuf, &offset_start, &count);
                }
 
        }
-
-       if (xfeatures_mxcsr_quirk(header.xfeatures)) {
-               offset = offsetof(struct fxregs_state, mxcsr);
-               size = MXCSR_AND_FLAGS_SIZE;
-               __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
-       }
-
-       /*
-        * Fill xsave->i387.sw_reserved value for ptrace frame:
-        */
-       offset = offsetof(struct fxregs_state, sw_reserved);
-       size = sizeof(xstate_fx_sw_bytes);
-
-       __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
+       fill_gap(size_total, &kbuf, &offset_start, &count);
 
        return 0;
 }
index 37a0aeaf89e771b63bccbeab92979469cbd96ecb..b0e641793be4fb280cec2e83d8c99ea7bd922ae3 100644 (file)
@@ -407,7 +407,8 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
 
        set_vm_flush_reset_perms(trampoline);
 
-       set_memory_ro((unsigned long)trampoline, npages);
+       if (likely(system_state != SYSTEM_BOOTING))
+               set_memory_ro((unsigned long)trampoline, npages);
        set_memory_x((unsigned long)trampoline, npages);
        return (unsigned long)trampoline;
 fail:
@@ -415,6 +416,32 @@ fail:
        return 0;
 }
 
+void set_ftrace_ops_ro(void)
+{
+       struct ftrace_ops *ops;
+       unsigned long start_offset;
+       unsigned long end_offset;
+       unsigned long npages;
+       unsigned long size;
+
+       do_for_each_ftrace_op(ops, ftrace_ops_list) {
+               if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
+                       continue;
+
+               if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                       start_offset = (unsigned long)ftrace_regs_caller;
+                       end_offset = (unsigned long)ftrace_regs_caller_end;
+               } else {
+                       start_offset = (unsigned long)ftrace_caller;
+                       end_offset = (unsigned long)ftrace_epilogue;
+               }
+               size = end_offset - start_offset;
+               size = size + RET_SIZE + sizeof(void *);
+               npages = DIV_ROUND_UP(size, PAGE_SIZE);
+               set_memory_ro((unsigned long)ops->trampoline, npages);
+       } while_for_each_ftrace_op(ops);
+}
+
 static unsigned long calc_trampoline_call_offset(bool save_regs)
 {
        unsigned long start_offset;
index a53e7b4a741925ba9048fcbba3030bccc9f3766e..e2fab3ceb09fb7da1d307e69af014b24ea65735f 100644 (file)
@@ -33,15 +33,15 @@ void io_bitmap_share(struct task_struct *tsk)
        set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
 }
 
-static void task_update_io_bitmap(void)
+static void task_update_io_bitmap(struct task_struct *tsk)
 {
-       struct thread_struct *t = &current->thread;
+       struct thread_struct *t = &tsk->thread;
 
        if (t->iopl_emul == 3 || t->io_bitmap) {
                /* TSS update is handled on exit to user space */
-               set_thread_flag(TIF_IO_BITMAP);
+               set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
        } else {
-               clear_thread_flag(TIF_IO_BITMAP);
+               clear_tsk_thread_flag(tsk, TIF_IO_BITMAP);
                /* Invalidate TSS */
                preempt_disable();
                tss_update_io_bitmap();
@@ -49,12 +49,12 @@ static void task_update_io_bitmap(void)
        }
 }
 
-void io_bitmap_exit(void)
+void io_bitmap_exit(struct task_struct *tsk)
 {
-       struct io_bitmap *iobm = current->thread.io_bitmap;
+       struct io_bitmap *iobm = tsk->thread.io_bitmap;
 
-       current->thread.io_bitmap = NULL;
-       task_update_io_bitmap();
+       tsk->thread.io_bitmap = NULL;
+       task_update_io_bitmap(tsk);
        if (iobm && refcount_dec_and_test(&iobm->refcnt))
                kfree(iobm);
 }
@@ -102,7 +102,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
                if (!iobm)
                        return -ENOMEM;
                refcount_set(&iobm->refcnt, 1);
-               io_bitmap_exit();
+               io_bitmap_exit(current);
        }
 
        /*
@@ -134,7 +134,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
        }
        /* All permissions dropped? */
        if (max_long == UINT_MAX) {
-               io_bitmap_exit();
+               io_bitmap_exit(current);
                return 0;
        }
 
@@ -192,7 +192,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
        }
 
        t->iopl_emul = level;
-       task_update_io_bitmap();
+       task_update_io_bitmap(current);
 
        return 0;
 }
index 9da70b279dad8c36a68b6c4c5f2862ad394d849f..35638f1c5791f17247d9c046351435cb75f119f6 100644 (file)
@@ -96,7 +96,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 }
 
 /*
- * Free current thread data structures etc..
+ * Free thread data structures etc..
  */
 void exit_thread(struct task_struct *tsk)
 {
@@ -104,7 +104,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (test_thread_flag(TIF_IO_BITMAP))
-               io_bitmap_exit();
+               io_bitmap_exit(tsk);
 
        free_vm86(t);
 
index 8c89e4d9ad28da008a4bb5c5d8abb95993edd4b2..2f24c334a938b3f0cbccb0de4f944f127e6e5503 100644 (file)
@@ -266,6 +266,14 @@ static void notrace start_secondary(void *unused)
 
        wmb();
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+
+       /*
+        * Prevent tail call to cpu_startup_entry() because the stack protector
+        * guard has been changed a couple of function calls up, in
+        * boot_init_stack_canary() and must not be checked before tail calling
+        * another function.
+        */
+       prevent_tail_call_optimization();
 }
 
 /**
index a224b5ab103fa60ddd64e0a9a459c53fdb521d77..54226110bc7fd707c56c8d5715b3e776a8586c21 100644 (file)
@@ -344,6 +344,9 @@ bad_address:
        if (IS_ENABLED(CONFIG_X86_32))
                goto the_end;
 
+       if (state->task != current)
+               goto the_end;
+
        if (state->regs) {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
index e9cc182aa97eed1d6d23858f5a89b2f663b1c6e9..7f969b2d240fd9a59a02eea197237dad70d8342b 100644 (file)
@@ -8,19 +8,21 @@
 #include <asm/orc_lookup.h>
 
 #define orc_warn(fmt, ...) \
-       printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
+       printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
+
+#define orc_warn_current(args...)                                      \
+({                                                                     \
+       if (state->task == current)                                     \
+               orc_warn(args);                                         \
+})
 
 extern int __start_orc_unwind_ip[];
 extern int __stop_orc_unwind_ip[];
 extern struct orc_entry __start_orc_unwind[];
 extern struct orc_entry __stop_orc_unwind[];
 
-static DEFINE_MUTEX(sort_mutex);
-int *cur_orc_ip_table = __start_orc_unwind_ip;
-struct orc_entry *cur_orc_table = __start_orc_unwind;
-
-unsigned int lookup_num_blocks;
-bool orc_init;
+static bool orc_init __ro_after_init;
+static unsigned int lookup_num_blocks __ro_after_init;
 
 static inline unsigned long orc_ip(const int *ip)
 {
@@ -142,9 +144,6 @@ static struct orc_entry *orc_find(unsigned long ip)
 {
        static struct orc_entry *orc;
 
-       if (!orc_init)
-               return NULL;
-
        if (ip == 0)
                return &null_orc_entry;
 
@@ -189,6 +188,10 @@ static struct orc_entry *orc_find(unsigned long ip)
 
 #ifdef CONFIG_MODULES
 
+static DEFINE_MUTEX(sort_mutex);
+static int *cur_orc_ip_table = __start_orc_unwind_ip;
+static struct orc_entry *cur_orc_table = __start_orc_unwind;
+
 static void orc_sort_swap(void *_a, void *_b, int size)
 {
        struct orc_entry *orc_a, *orc_b;
@@ -317,12 +320,19 @@ EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
 {
+       struct task_struct *task = state->task;
+
        if (unwind_done(state))
                return NULL;
 
        if (state->regs)
                return &state->regs->ip;
 
+       if (task != current && state->sp == task->thread.sp) {
+               struct inactive_task_frame *frame = (void *)task->thread.sp;
+               return &frame->ret_addr;
+       }
+
        if (state->sp)
                return (unsigned long *)state->sp - 1;
 
@@ -381,9 +391,38 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
        return true;
 }
 
+/*
+ * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
+ * value from state->regs.
+ *
+ * Otherwise, if state->regs just points to IRET regs, and the previous frame
+ * had full regs, it's safe to get the value from the previous regs.  This can
+ * happen when early/late IRQ entry code gets interrupted by an NMI.
+ */
+static bool get_reg(struct unwind_state *state, unsigned int reg_off,
+                   unsigned long *val)
+{
+       unsigned int reg = reg_off/8;
+
+       if (!state->regs)
+               return false;
+
+       if (state->full_regs) {
+               *val = ((unsigned long *)state->regs)[reg];
+               return true;
+       }
+
+       if (state->prev_regs) {
+               *val = ((unsigned long *)state->prev_regs)[reg];
+               return true;
+       }
+
+       return false;
+}
+
 bool unwind_next_frame(struct unwind_state *state)
 {
-       unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
+       unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
        enum stack_type prev_type = state->stack_info.type;
        struct orc_entry *orc;
        bool indirect = false;
@@ -445,43 +484,39 @@ bool unwind_next_frame(struct unwind_state *state)
                break;
 
        case ORC_REG_R10:
-               if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg R10 at ip %pB\n",
-                                (void *)state->ip);
+               if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
+                       orc_warn_current("missing R10 value at %pB\n",
+                                        (void *)state->ip);
                        goto err;
                }
-               sp = state->regs->r10;
                break;
 
        case ORC_REG_R13:
-               if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg R13 at ip %pB\n",
-                                (void *)state->ip);
+               if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
+                       orc_warn_current("missing R13 value at %pB\n",
+                                        (void *)state->ip);
                        goto err;
                }
-               sp = state->regs->r13;
                break;
 
        case ORC_REG_DI:
-               if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg DI at ip %pB\n",
-                                (void *)state->ip);
+               if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
+                       orc_warn_current("missing RDI value at %pB\n",
+                                        (void *)state->ip);
                        goto err;
                }
-               sp = state->regs->di;
                break;
 
        case ORC_REG_DX:
-               if (!state->regs || !state->full_regs) {
-                       orc_warn("missing regs for base reg DX at ip %pB\n",
-                                (void *)state->ip);
+               if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
+                       orc_warn_current("missing DX value at %pB\n",
+                                        (void *)state->ip);
                        goto err;
                }
-               sp = state->regs->dx;
                break;
 
        default:
-               orc_warn("unknown SP base reg %d for ip %pB\n",
+               orc_warn("unknown SP base reg %d at %pB\n",
                         orc->sp_reg, (void *)state->ip);
                goto err;
        }
@@ -504,44 +539,48 @@ bool unwind_next_frame(struct unwind_state *state)
 
                state->sp = sp;
                state->regs = NULL;
+               state->prev_regs = NULL;
                state->signal = false;
                break;
 
        case ORC_TYPE_REGS:
                if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
-                       orc_warn("can't dereference registers at %p for ip %pB\n",
-                                (void *)sp, (void *)orig_ip);
+                       orc_warn_current("can't access registers at %pB\n",
+                                        (void *)orig_ip);
                        goto err;
                }
 
                state->regs = (struct pt_regs *)sp;
+               state->prev_regs = NULL;
                state->full_regs = true;
                state->signal = true;
                break;
 
        case ORC_TYPE_REGS_IRET:
                if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
-                       orc_warn("can't dereference iret registers at %p for ip %pB\n",
-                                (void *)sp, (void *)orig_ip);
+                       orc_warn_current("can't access iret registers at %pB\n",
+                                        (void *)orig_ip);
                        goto err;
                }
 
+               if (state->full_regs)
+                       state->prev_regs = state->regs;
                state->regs = (void *)sp - IRET_FRAME_OFFSET;
                state->full_regs = false;
                state->signal = true;
                break;
 
        default:
-               orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
+               orc_warn("unknown .orc_unwind entry type %d at %pB\n",
                         orc->type, (void *)orig_ip);
-               break;
+               goto err;
        }
 
        /* Find BP: */
        switch (orc->bp_reg) {
        case ORC_REG_UNDEFINED:
-               if (state->regs && state->full_regs)
-                       state->bp = state->regs->bp;
+               if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
+                       state->bp = tmp;
                break;
 
        case ORC_REG_PREV_SP:
@@ -564,8 +603,8 @@ bool unwind_next_frame(struct unwind_state *state)
        if (state->stack_info.type == prev_type &&
            on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
            state->sp <= prev_sp) {
-               orc_warn("stack going in the wrong direction? ip=%pB\n",
-                        (void *)orig_ip);
+               orc_warn_current("stack going in the wrong direction? at %pB\n",
+                                (void *)orig_ip);
                goto err;
        }
 
@@ -588,17 +627,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        memset(state, 0, sizeof(*state));
        state->task = task;
 
+       if (!orc_init)
+               goto err;
+
        /*
         * Refuse to unwind the stack of a task while it's executing on another
         * CPU.  This check is racy, but that's ok: the unwinder has other
         * checks to prevent it from going off the rails.
         */
        if (task_on_another_cpu(task))
-               goto done;
+               goto err;
 
        if (regs) {
                if (user_mode(regs))
-                       goto done;
+                       goto the_end;
 
                state->ip = regs->ip;
                state->sp = regs->sp;
@@ -631,6 +673,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
                 * generate some kind of backtrace if this happens.
                 */
                void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+               state->error = true;
                if (get_stack_info(next_page, state->task, &state->stack_info,
                                   &state->stack_mask))
                        return;
@@ -651,13 +694,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        /* Otherwise, skip ahead to the user-specified starting frame: */
        while (!unwind_done(state) &&
               (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
-                       state->sp <= (unsigned long)first_frame))
+                       state->sp < (unsigned long)first_frame))
                unwind_next_frame(state);
 
        return;
 
-done:
+err:
+       state->error = true;
+the_end:
        state->stack_info.type = STACK_TYPE_UNKNOWN;
-       return;
 }
 EXPORT_SYMBOL_GPL(__unwind_start);
index bcefa9d4e57ef333e803abfd6f87c39fd878cd92..54d4b98b49e182ac5abd3910ac7a97ad60c251f1 100644 (file)
@@ -1427,7 +1427,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
         */
        kvm_make_vcpus_request_mask(kvm,
                                    KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
-                                   vcpu_mask, &hv_vcpu->tlb_flush);
+                                   NULL, vcpu_mask, &hv_vcpu->tlb_flush);
 
 ret_success:
        /* We always do full TLB flush, set rep_done = rep_cnt. */
index 750ff0b294047cf83b2c28f72cfbd057375fc7b1..d057376bd3d33ceb24155d6b2d19a7e5fa18a71a 100644 (file)
@@ -225,12 +225,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
        }
 
        /*
-        * AMD SVM AVIC accelerate EOI write and do not trap,
-        * in-kernel IOAPIC will not be able to receive the EOI.
-        * In this case, we do lazy update of the pending EOI when
-        * trying to set IOAPIC irq.
+        * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
+        * triggered, in which case the in-kernel IOAPIC will not be able
+        * to receive the EOI.  In this case, we do a lazy update of the
+        * pending EOI when trying to set IOAPIC irq.
         */
-       if (kvm_apicv_activated(ioapic->kvm))
+       if (edge && kvm_apicv_activated(ioapic->kvm))
                ioapic_lazy_update_eoi(ioapic, irq);
 
        /*
index 90a1ca93962782fd39d0ea7a0368fb5b47829086..9a2a62e5afebe9e17c099bbe04e204135c1c2ff6 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 
 #include <asm/msr-index.h>
+#include <asm/debugreg.h>
 
 #include "kvm_emulate.h"
 #include "trace.h"
@@ -267,7 +268,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->vmcb->save.rsp = nested_vmcb->save.rsp;
        svm->vmcb->save.rip = nested_vmcb->save.rip;
        svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
-       svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
+       svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
 
        svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -482,7 +483,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_vmcb->save.rsp    = vmcb->save.rsp;
        nested_vmcb->save.rax    = vmcb->save.rax;
        nested_vmcb->save.dr7    = vmcb->save.dr7;
-       nested_vmcb->save.dr6    = vmcb->save.dr6;
+       nested_vmcb->save.dr6    = svm->vcpu.arch.dr6;
        nested_vmcb->save.cpl    = vmcb->save.cpl;
 
        nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
@@ -606,26 +607,45 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 /* DB exceptions for our internal use must not cause vmexit */
 static int nested_svm_intercept_db(struct vcpu_svm *svm)
 {
-       unsigned long dr6;
+       unsigned long dr6 = svm->vmcb->save.dr6;
+
+       /* Always catch it and pass it to userspace if debugging.  */
+       if (svm->vcpu.guest_debug &
+           (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+               return NESTED_EXIT_HOST;
 
        /* if we're not singlestepping, it's not ours */
        if (!svm->nmi_singlestep)
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
 
        /* if it's not a singlestep exception, it's not ours */
-       if (kvm_get_dr(&svm->vcpu, 6, &dr6))
-               return NESTED_EXIT_DONE;
        if (!(dr6 & DR6_BS))
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
 
        /* if the guest is singlestepping, it should get the vmexit */
        if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
                disable_nmi_singlestep(svm);
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
        }
 
        /* it's ours, the nested hypervisor must not see this one */
        return NESTED_EXIT_HOST;
+
+reflected_db:
+       /*
+        * Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
+        * it will be moved into the nested VMCB by nested_svm_vmexit.  Once
+        * exceptions will be moved to svm_check_nested_events, all this stuff
+        * will just go away and we could just return NESTED_EXIT_HOST
+        * unconditionally.  db_interception will queue the exception, which
+        * will be processed by svm_check_nested_events if a nested vmexit is
+        * required, and we will just use kvm_deliver_exception_payload to copy
+        * the payload to DR6 before vmexit.
+        */
+       WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
+       svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
+       svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
+       return NESTED_EXIT_DONE;
 }
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
@@ -682,6 +702,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
                if (svm->nested.intercept_exceptions & excp_bits) {
                        if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
                                vmexit = nested_svm_intercept_db(svm);
+                       else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
+                                svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
+                               vmexit = NESTED_EXIT_HOST;
                        else
                                vmexit = NESTED_EXIT_DONE;
                }
index cf912b4aaba8e419f73c00ee26a760e7752ec1f1..89f7f3aebd31b102eee3e20443455e850fcddef2 100644 (file)
@@ -345,7 +345,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
                return NULL;
 
        /* Pin the user virtual address. */
-       npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
+       npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
        if (npinned != npages) {
                pr_err("SEV: Failure locking %lu pages.\n", npages);
                goto err;
index 2f379bacbb26a3750157f80456a8d6321fa53ad2..a862c768fd542695614d335b033dd18d1c3f86f9 100644 (file)
@@ -1672,17 +1672,14 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
        mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
-static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
+static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
 {
-       return to_svm(vcpu)->vmcb->save.dr6;
-}
-
-static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb *vmcb = svm->vmcb;
 
-       svm->vmcb->save.dr6 = value;
-       mark_dirty(svm->vmcb, VMCB_DR);
+       if (unlikely(value != vmcb->save.dr6)) {
+               vmcb->save.dr6 = value;
+               mark_dirty(vmcb, VMCB_DR);
+       }
 }
 
 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
@@ -1693,9 +1690,12 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
        get_debugreg(vcpu->arch.db[1], 1);
        get_debugreg(vcpu->arch.db[2], 2);
        get_debugreg(vcpu->arch.db[3], 3);
-       vcpu->arch.dr6 = svm_get_dr6(vcpu);
+       /*
+        * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
+        * because db_interception might need it.  We can do it before vmentry.
+        */
+       vcpu->arch.dr6 = svm->vmcb->save.dr6;
        vcpu->arch.dr7 = svm->vmcb->save.dr7;
-
        vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
        set_dr_intercepts(svm);
 }
@@ -1739,7 +1739,8 @@ static int db_interception(struct vcpu_svm *svm)
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
                !svm->nmi_singlestep) {
-               kvm_queue_exception(&svm->vcpu, DB_VECTOR);
+               u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
+               kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
                return 1;
        }
 
@@ -1752,6 +1753,8 @@ static int db_interception(struct vcpu_svm *svm)
        if (svm->vcpu.guest_debug &
            (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
+               kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
+               kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
                kvm_run->debug.arch.pc =
                        svm->vmcb->save.cs.base + svm->vmcb->save.rip;
                kvm_run->debug.arch.exception = DB_VECTOR;
@@ -3315,6 +3318,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
+       /*
+        * Run with all-zero DR6 unless needed, so that we can get the exact cause
+        * of a #DB.
+        */
+       if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+               svm_set_dr6(svm, vcpu->arch.dr6);
+       else
+               svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
+
        clgi();
        kvm_load_guest_xsave_state(vcpu);
 
@@ -3929,8 +3941,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .set_idt = svm_set_idt,
        .get_gdt = svm_get_gdt,
        .set_gdt = svm_set_gdt,
-       .get_dr6 = svm_get_dr6,
-       .set_dr6 = svm_set_dr6,
        .set_dr7 = svm_set_dr7,
        .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
        .cache_reg = svm_cache_reg,
index fd78ffbde644479348510b25c8fbf8433291509a..e44f33c82332505b057ae016cf0d5a5c0cba23d0 100644 (file)
@@ -5165,7 +5165,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
         */
                break;
        default:
-               BUG_ON(1);
+               BUG();
                break;
        }
 
index 87f3f24fef37b45980fae9bfbdb19e9485c87a61..51d1a82742fd5ba11b0b18f545b37edf5d3e6767 100644 (file)
@@ -82,6 +82,9 @@ SYM_FUNC_START(vmx_vmexit)
        /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
        FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 
+       /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
+       or $1, %_ASM_AX
+
        pop %_ASM_AX
 .Lvmexit_skip_rsb:
 #endif
index c2c6335a998c2f09f7f608b38e52403765c65e22..89c766fad889ea2581678d1cd966dea556418529 100644 (file)
@@ -1372,7 +1372,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        vmx_vcpu_pi_load(vcpu, cpu);
 
-       vmx->host_pkru = read_pkru();
        vmx->host_debugctlmsr = get_debugctlmsr();
 }
 
@@ -4677,15 +4676,13 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
                dr6 = vmcs_readl(EXIT_QUALIFICATION);
                if (!(vcpu->guest_debug &
                      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
-                       vcpu->arch.dr6 &= ~DR_TRAP_BITS;
-                       vcpu->arch.dr6 |= dr6 | DR6_RTM;
                        if (is_icebp(intr_info))
                                WARN_ON(!skip_emulated_instruction(vcpu));
 
-                       kvm_queue_exception(vcpu, DB_VECTOR);
+                       kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
                        return 1;
                }
-               kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
+               kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
                kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
                /* fall through */
        case BP_VECTOR:
@@ -4929,16 +4926,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                 * guest debugging itself.
                 */
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
-                       vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
+                       vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
                        vcpu->run->debug.arch.dr7 = dr7;
                        vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
                        vcpu->run->debug.arch.exception = DB_VECTOR;
                        vcpu->run->exit_reason = KVM_EXIT_DEBUG;
                        return 0;
                } else {
-                       vcpu->arch.dr6 &= ~DR_TRAP_BITS;
-                       vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
-                       kvm_queue_exception(vcpu, DB_VECTOR);
+                       kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
                        return 1;
                }
        }
@@ -4969,15 +4964,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
        return kvm_skip_emulated_instruction(vcpu);
 }
 
-static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.dr6;
-}
-
-static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
-{
-}
-
 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
 {
        get_debugreg(vcpu->arch.db[0], 0);
@@ -6577,11 +6563,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        kvm_load_guest_xsave_state(vcpu);
 
-       if (static_cpu_has(X86_FEATURE_PKU) &&
-           kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
-           vcpu->arch.pkru != vmx->host_pkru)
-               __write_pkru(vcpu->arch.pkru);
-
        pt_guest_enter(vmx);
 
        if (vcpu_to_pmu(vcpu)->version)
@@ -6671,18 +6652,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        pt_guest_exit(vmx);
 
-       /*
-        * eager fpu is enabled if PKEY is supported and CR4 is switched
-        * back on host, so it is safe to read guest PKRU from current
-        * XSAVE.
-        */
-       if (static_cpu_has(X86_FEATURE_PKU) &&
-           kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
-               vcpu->arch.pkru = rdpkru();
-               if (vcpu->arch.pkru != vmx->host_pkru)
-                       __write_pkru(vmx->host_pkru);
-       }
-
        kvm_load_host_xsave_state(vcpu);
 
        vmx->nested.nested_run_pending = 0;
@@ -7740,8 +7709,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
-       .get_dr6 = vmx_get_dr6,
-       .set_dr6 = vmx_set_dr6,
        .set_dr7 = vmx_set_dr7,
        .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
        .cache_reg = vmx_cache_reg,
index c5835f9cb9ad327aba58c1cab2a47934f5807d53..c17e6eb9ad43d84fece0aeb875bfd903e69a252c 100644 (file)
@@ -572,11 +572,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
-                                 unsigned long payload)
+void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
+                          unsigned long payload)
 {
        kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
 }
+EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
 
 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
                                    u32 error_code, unsigned long payload)
@@ -836,11 +837,25 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
                    vcpu->arch.ia32_xss != host_xss)
                        wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
        }
+
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
+            (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
+           vcpu->arch.pkru != vcpu->arch.host_pkru)
+               __write_pkru(vcpu->arch.pkru);
 }
 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
 
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
 {
+       if (static_cpu_has(X86_FEATURE_PKU) &&
+           (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
+            (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
+               vcpu->arch.pkru = rdpkru();
+               if (vcpu->arch.pkru != vcpu->arch.host_pkru)
+                       __write_pkru(vcpu->arch.host_pkru);
+       }
+
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
 
                if (vcpu->arch.xcr0 != host_xcr0)
@@ -926,19 +941,6 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
        __reserved_bits;                                \
 })
 
-static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
-{
-       u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
-
-       if (kvm_cpu_cap_has(X86_FEATURE_LA57))
-               reserved_bits &= ~X86_CR4_LA57;
-
-       if (kvm_cpu_cap_has(X86_FEATURE_UMIP))
-               reserved_bits &= ~X86_CR4_UMIP;
-
-       return reserved_bits;
-}
-
 static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        if (cr4 & cr4_reserved_bits)
@@ -1058,12 +1060,6 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_update_dr6(struct kvm_vcpu *vcpu)
-{
-       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
-               kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
-}
-
 static void kvm_update_dr7(struct kvm_vcpu *vcpu)
 {
        unsigned long dr7;
@@ -1103,7 +1099,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
                if (val & 0xffffffff00000000ULL)
                        return -1; /* #GP */
                vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
-               kvm_update_dr6(vcpu);
                break;
        case 5:
                /* fall through */
@@ -1139,10 +1134,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
        case 4:
                /* fall through */
        case 6:
-               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
-                       *val = vcpu->arch.dr6;
-               else
-                       *val = kvm_x86_ops.get_dr6(vcpu);
+               *val = vcpu->arch.dr6;
                break;
        case 5:
                /* fall through */
@@ -3385,6 +3377,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_GET_MSR_FEATURES:
        case KVM_CAP_MSR_PLATFORM_INFO:
        case KVM_CAP_EXCEPTION_PAYLOAD:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_SYNC_REGS:
@@ -3570,6 +3563,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        kvm_x86_ops.vcpu_load(vcpu, cpu);
 
+       /* Save host pkru register if supported */
+       vcpu->arch.host_pkru = read_pkru();
+
        /* Apply any externally detected TSC adjustments (due to suspend) */
        if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
                adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
@@ -3763,7 +3759,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
        unsigned bank_num = mcg_cap & 0xff, bank;
 
        r = -EINVAL;
-       if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
+       if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
                goto out;
        if (mcg_cap & ~(kvm_mce_cap_supported | 0xff | 0xff0000))
                goto out;
@@ -4021,7 +4017,6 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
        kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = dbgregs->dr6;
-       kvm_update_dr6(vcpu);
        vcpu->arch.dr7 = dbgregs->dr7;
        kvm_update_dr7(vcpu);
 
@@ -6671,7 +6666,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
 
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
                kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
-               kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+               kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
                kvm_run->debug.arch.exception = DB_VECTOR;
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
                return 0;
@@ -6731,9 +6726,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
                                           vcpu->arch.db);
 
                if (dr6 != 0) {
-                       vcpu->arch.dr6 &= ~DR_TRAP_BITS;
-                       vcpu->arch.dr6 |= dr6 | DR6_RTM;
-                       kvm_queue_exception(vcpu, DB_VECTOR);
+                       kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
                        *r = 1;
                        return true;
                }
@@ -8054,7 +8047,7 @@ void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
        kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
-                                   vcpu_bitmap, cpus);
+                                   NULL, vcpu_bitmap, cpus);
 
        free_cpumask_var(cpus);
 }
@@ -8084,6 +8077,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
  */
 void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
 {
+       struct kvm_vcpu *except;
        unsigned long old, new, expected;
 
        if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
@@ -8108,7 +8102,17 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
        trace_kvm_apicv_update_request(activate, bit);
        if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
                kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
-       kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
+
+       /*
+        * Sending request to update APICV for all other vcpus,
+        * while update the calling vcpu immediately instead of
+        * waiting for another #VMEXIT to handle the request.
+        */
+       except = kvm_get_running_vcpu();
+       kvm_make_all_cpus_request_except(kvm, KVM_REQ_APICV_UPDATE,
+                                        except);
+       if (except)
+               kvm_vcpu_update_apicv(except);
 }
 EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
 
@@ -8432,7 +8436,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
                kvm_x86_ops.sync_dirty_debug_regs(vcpu);
                kvm_update_dr0123(vcpu);
-               kvm_update_dr6(vcpu);
                kvm_update_dr7(vcpu);
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
@@ -9493,7 +9496,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
        kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = DR6_INIT;
-       kvm_update_dr6(vcpu);
        vcpu->arch.dr7 = DR7_FIXED_1;
        kvm_update_dr7(vcpu);
 
@@ -9675,7 +9677,9 @@ int kvm_arch_hardware_setup(void *opaque)
        if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
                supported_xss = 0;
 
-       cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
+#define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
+       cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
+#undef __kvm_cpu_cap_has
 
        if (kvm_has_tsc_control) {
                /*
@@ -9707,7 +9711,8 @@ int kvm_arch_check_processor_compat(void *opaque)
 
        WARN_ON(!irqs_disabled());
 
-       if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
+       if (__cr4_reserved_bits(cpu_has, c) !=
+           __cr4_reserved_bits(cpu_has, &boot_cpu_data))
                return -EIO;
 
        return ops->check_processor_compatibility();
index 3b289c2f75cdf9148cb7a63975b5fbd0db61a5e3..8b5f73f5e207c3e1a0d18a5cbf9ba0049088a309 100644 (file)
@@ -54,6 +54,7 @@
 #include <asm/init.h>
 #include <asm/uv/uv.h>
 #include <asm/setup.h>
+#include <asm/ftrace.h>
 
 #include "mm_internal.h"
 
@@ -1291,6 +1292,8 @@ void mark_rodata_ro(void)
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
        set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
+       set_ftrace_ops_ro();
+
 #ifdef CONFIG_CPA_DEBUG
        printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
        set_memory_rw(start, (end-start) >> PAGE_SHIFT);
index 109325d77b3e8f0cef8c77d6466965a70cc896fa..43fd19b3f1185caf9ab7d3a8eafc0216a7afc5b7 100644 (file)
@@ -372,7 +372,7 @@ static void enter_uniprocessor(void)
        int cpu;
        int err;
 
-       if (downed_cpus == NULL &&
+       if (!cpumask_available(downed_cpus) &&
            !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
                pr_notice("Failed to allocate mask\n");
                goto out;
@@ -402,7 +402,7 @@ static void leave_uniprocessor(void)
        int cpu;
        int err;
 
-       if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
+       if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
                return;
        pr_notice("Re-enabling CPUs...\n");
        for_each_cpu(cpu, downed_cpus) {
index 59eca6a94ce7961ace4cd6588a4f7cc5df48294b..b8c55a2e402d3d5a6e53da82cec81a78f68fc675 100644 (file)
@@ -43,7 +43,8 @@ struct cpa_data {
        unsigned long   pfn;
        unsigned int    flags;
        unsigned int    force_split             : 1,
-                       force_static_prot       : 1;
+                       force_static_prot       : 1,
+                       force_flush_all         : 1;
        struct page     **pages;
 };
 
@@ -355,10 +356,10 @@ static void cpa_flush(struct cpa_data *data, int cache)
                return;
        }
 
-       if (cpa->numpages <= tlb_single_page_flush_ceiling)
-               on_each_cpu(__cpa_flush_tlb, cpa, 1);
-       else
+       if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
                flush_tlb_all();
+       else
+               on_each_cpu(__cpa_flush_tlb, cpa, 1);
 
        if (!cache)
                return;
@@ -1598,6 +1599,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
                alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
                alias_cpa.curpage = 0;
 
+               cpa->force_flush_all = 1;
+
                ret = __change_page_attr_set_clr(&alias_cpa, 0);
                if (ret)
                        return ret;
@@ -1618,6 +1621,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
                alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
                alias_cpa.curpage = 0;
 
+               cpa->force_flush_all = 1;
                /*
                 * The high mapping range is imprecise, so ignore the
                 * return value.
index 8fb8a50a28b4cae96f750381257a9369527ee43a..f2adb63b2d7cc4d7263531b27cd3e026e1f2d99b 100644 (file)
@@ -93,6 +93,7 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
        cpu_bringup();
        boot_init_stack_canary();
        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+       prevent_tail_call_optimization();
 }
 
 void xen_smp_intr_free_pv(unsigned int cpu)
index 78ba57efd16b5fe3dca4af5108c7e7cce46b2def..3d411716d7ee4d2a4c57df277adb95ce32d2ab8d 100644 (file)
 #include <linux/ioprio.h>
 #include <linux/sbitmap.h>
 #include <linux/delay.h>
+#include <linux/backing-dev.h>
 
 #include "blk.h"
 #include "blk-mq.h"
@@ -4976,8 +4977,9 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
        ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
        switch (ioprio_class) {
        default:
-               dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
-                       "bfq: bad prio class %d\n", ioprio_class);
+               pr_err("bdi %s: bfq: bad prio class %d\n",
+                               bdi_dev_name(bfqq->bfqd->queue->backing_dev_info),
+                               ioprio_class);
                /* fall through */
        case IOPRIO_CLASS_NONE:
                /*
index c5dc833212e1d1a58fdf55f4686c421e3e34a55d..930212c1a5129c18b0d685827a48ebeb799774fc 100644 (file)
@@ -496,7 +496,7 @@ const char *blkg_dev_name(struct blkcg_gq *blkg)
 {
        /* some drivers (floppy) instantiate a queue w/o disk registered */
        if (blkg->q->backing_dev_info->dev)
-               return dev_name(blkg->q->backing_dev_info->dev);
+               return bdi_dev_name(blkg->q->backing_dev_info);
        return NULL;
 }
 
index 7e4a1da0715ea85c4f1340522b97e5ec1f4d9d04..9bfaee050c825f2ccb16981f693b4cc16f7557a9 100644 (file)
@@ -891,14 +891,11 @@ generic_make_request_checks(struct bio *bio)
        }
 
        /*
-        * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
-        * with BLK_STS_AGAIN status in order to catch -EAGAIN and
-        * to give a chance to the caller to repeat request gracefully.
+        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+        * if queue is not a request based queue.
         */
-       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
-               status = BLK_STS_AGAIN;
-               goto end_io;
-       }
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
+               goto not_supported;
 
        if (should_fail_bio(bio))
                goto end_io;
index 3ab0c1c704b68f0a9d5574d2a76ea252f3cc950d..7c1fe605d0d6cb2a3de837977653c187b626c040 100644 (file)
@@ -466,7 +466,7 @@ struct ioc_gq {
         */
        atomic64_t                      vtime;
        atomic64_t                      done_vtime;
-       atomic64_t                      abs_vdebt;
+       u64                             abs_vdebt;
        u64                             last_vtime;
 
        /*
@@ -1142,7 +1142,7 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
        struct iocg_wake_ctx ctx = { .iocg = iocg };
        u64 margin_ns = (u64)(ioc->period_us *
                              WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
-       u64 abs_vdebt, vdebt, vshortage, expires, oexpires;
+       u64 vdebt, vshortage, expires, oexpires;
        s64 vbudget;
        u32 hw_inuse;
 
@@ -1152,18 +1152,15 @@ static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
        vbudget = now->vnow - atomic64_read(&iocg->vtime);
 
        /* pay off debt */
-       abs_vdebt = atomic64_read(&iocg->abs_vdebt);
-       vdebt = abs_cost_to_cost(abs_vdebt, hw_inuse);
+       vdebt = abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
        if (vdebt && vbudget > 0) {
                u64 delta = min_t(u64, vbudget, vdebt);
                u64 abs_delta = min(cost_to_abs_cost(delta, hw_inuse),
-                                   abs_vdebt);
+                                   iocg->abs_vdebt);
 
                atomic64_add(delta, &iocg->vtime);
                atomic64_add(delta, &iocg->done_vtime);
-               atomic64_sub(abs_delta, &iocg->abs_vdebt);
-               if (WARN_ON_ONCE(atomic64_read(&iocg->abs_vdebt) < 0))
-                       atomic64_set(&iocg->abs_vdebt, 0);
+               iocg->abs_vdebt -= abs_delta;
        }
 
        /*
@@ -1219,12 +1216,18 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
        u64 expires, oexpires;
        u32 hw_inuse;
 
+       lockdep_assert_held(&iocg->waitq.lock);
+
        /* debt-adjust vtime */
        current_hweight(iocg, NULL, &hw_inuse);
-       vtime += abs_cost_to_cost(atomic64_read(&iocg->abs_vdebt), hw_inuse);
+       vtime += abs_cost_to_cost(iocg->abs_vdebt, hw_inuse);
 
-       /* clear or maintain depending on the overage */
-       if (time_before_eq64(vtime, now->vnow)) {
+       /*
+        * Clear or maintain depending on the overage. Non-zero vdebt is what
+        * guarantees that @iocg is online and future iocg_kick_delay() will
+        * clear use_delay. Don't leave it on when there's no vdebt.
+        */
+       if (!iocg->abs_vdebt || time_before_eq64(vtime, now->vnow)) {
                blkcg_clear_delay(blkg);
                return false;
        }
@@ -1258,9 +1261,12 @@ static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
 {
        struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
        struct ioc_now now;
+       unsigned long flags;
 
+       spin_lock_irqsave(&iocg->waitq.lock, flags);
        ioc_now(iocg->ioc, &now);
        iocg_kick_delay(iocg, &now, 0);
+       spin_unlock_irqrestore(&iocg->waitq.lock, flags);
 
        return HRTIMER_NORESTART;
 }
@@ -1368,14 +1374,13 @@ static void ioc_timer_fn(struct timer_list *timer)
         * should have woken up in the last period and expire idle iocgs.
         */
        list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
-               if (!waitqueue_active(&iocg->waitq) &&
-                   !atomic64_read(&iocg->abs_vdebt) && !iocg_is_idle(iocg))
+               if (!waitqueue_active(&iocg->waitq) && iocg->abs_vdebt &&
+                   !iocg_is_idle(iocg))
                        continue;
 
                spin_lock(&iocg->waitq.lock);
 
-               if (waitqueue_active(&iocg->waitq) ||
-                   atomic64_read(&iocg->abs_vdebt)) {
+               if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt) {
                        /* might be oversleeping vtime / hweight changes, kick */
                        iocg_kick_waitq(iocg, &now);
                        iocg_kick_delay(iocg, &now, 0);
@@ -1718,28 +1723,49 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
         * tests are racy but the races aren't systemic - we only miss once
         * in a while which is fine.
         */
-       if (!waitqueue_active(&iocg->waitq) &&
-           !atomic64_read(&iocg->abs_vdebt) &&
+       if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
            time_before_eq64(vtime + cost, now.vnow)) {
                iocg_commit_bio(iocg, bio, cost);
                return;
        }
 
        /*
-        * We're over budget.  If @bio has to be issued regardless,
-        * remember the abs_cost instead of advancing vtime.
-        * iocg_kick_waitq() will pay off the debt before waking more IOs.
+        * We activated above but w/o any synchronization. Deactivation is
+        * synchronized with waitq.lock and we won't get deactivated as long
+        * as we're waiting or has debt, so we're good if we're activated
+        * here. In the unlikely case that we aren't, just issue the IO.
+        */
+       spin_lock_irq(&iocg->waitq.lock);
+
+       if (unlikely(list_empty(&iocg->active_list))) {
+               spin_unlock_irq(&iocg->waitq.lock);
+               iocg_commit_bio(iocg, bio, cost);
+               return;
+       }
+
+       /*
+        * We're over budget. If @bio has to be issued regardless, remember
+        * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
+        * off the debt before waking more IOs.
+        *
         * This way, the debt is continuously paid off each period with the
-        * actual budget available to the cgroup.  If we just wound vtime,
-        * we would incorrectly use the current hw_inuse for the entire
-        * amount which, for example, can lead to the cgroup staying
-        * blocked for a long time even with substantially raised hw_inuse.
+        * actual budget available to the cgroup. If we just wound vtime, we
+        * would incorrectly use the current hw_inuse for the entire amount
+        * which, for example, can lead to the cgroup staying blocked for a
+        * long time even with substantially raised hw_inuse.
+        *
+        * An iocg with vdebt should stay online so that the timer can keep
+        * deducting its vdebt and [de]activate use_delay mechanism
+        * accordingly. We don't want to race against the timer trying to
+        * clear them and leave @iocg inactive w/ dangling use_delay heavily
+        * penalizing the cgroup and its descendants.
         */
        if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
-               atomic64_add(abs_cost, &iocg->abs_vdebt);
+               iocg->abs_vdebt += abs_cost;
                if (iocg_kick_delay(iocg, &now, cost))
                        blkcg_schedule_throttle(rqos->q,
                                        (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
+               spin_unlock_irq(&iocg->waitq.lock);
                return;
        }
 
@@ -1756,20 +1782,6 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
         * All waiters are on iocg->waitq and the wait states are
         * synchronized using waitq.lock.
         */
-       spin_lock_irq(&iocg->waitq.lock);
-
-       /*
-        * We activated above but w/o any synchronization.  Deactivation is
-        * synchronized with waitq.lock and we won't get deactivated as
-        * long as we're waiting, so we're good if we're activated here.
-        * In the unlikely case that we are deactivated, just issue the IO.
-        */
-       if (unlikely(list_empty(&iocg->active_list))) {
-               spin_unlock_irq(&iocg->waitq.lock);
-               iocg_commit_bio(iocg, bio, cost);
-               return;
-       }
-
        init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
        wait.wait.private = current;
        wait.bio = bio;
@@ -1801,6 +1813,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
        struct ioc_now now;
        u32 hw_inuse;
        u64 abs_cost, cost;
+       unsigned long flags;
 
        /* bypass if disabled or for root cgroup */
        if (!ioc->enabled || !iocg->level)
@@ -1820,15 +1833,28 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
                iocg->cursor = bio_end;
 
        /*
-        * Charge if there's enough vtime budget and the existing request
-        * has cost assigned.  Otherwise, account it as debt.  See debt
-        * handling in ioc_rqos_throttle() for details.
+        * Charge if there's enough vtime budget and the existing request has
+        * cost assigned.
         */
        if (rq->bio && rq->bio->bi_iocost_cost &&
-           time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow))
+           time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
                iocg_commit_bio(iocg, bio, cost);
-       else
-               atomic64_add(abs_cost, &iocg->abs_vdebt);
+               return;
+       }
+
+       /*
+        * Otherwise, account it as debt if @iocg is online, which it should
+        * be for the vast majority of cases. See debt handling in
+        * ioc_rqos_throttle() for details.
+        */
+       spin_lock_irqsave(&iocg->waitq.lock, flags);
+       if (likely(!list_empty(&iocg->active_list))) {
+               iocg->abs_vdebt += abs_cost;
+               iocg_kick_delay(iocg, &now, cost);
+       } else {
+               iocg_commit_bio(iocg, bio, cost);
+       }
+       spin_unlock_irqrestore(&iocg->waitq.lock, flags);
 }
 
 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
@@ -1998,7 +2024,6 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
        iocg->ioc = ioc;
        atomic64_set(&iocg->vtime, now.vnow);
        atomic64_set(&iocg->done_vtime, now.vnow);
-       atomic64_set(&iocg->abs_vdebt, 0);
        atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
        INIT_LIST_HEAD(&iocg->active_list);
        iocg->hweight_active = HWEIGHT_WHOLE;
index bc1ded1331b148b5317a662f6f3a97a86ec3f4d1..9ef48a8cff867268830e0b06ac2c8d91f8a521fc 100644 (file)
@@ -496,7 +496,7 @@ int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
 
        if (!disk_part_scan_enabled(disk))
                return 0;
-       if (bdev->bd_part_count || bdev->bd_openers > 1)
+       if (bdev->bd_part_count)
                return -EBUSY;
        res = invalidate_partition(disk, 0);
        if (res)
index 376d7ed3f1f8715d0da884425fabcf41b74149a9..3c734b81b3a20108a4b7292bf17d2e6c179fcfb1 100644 (file)
@@ -287,7 +287,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
        crypto_free_skcipher(ctx->child);
 }
 
-static void free(struct skcipher_instance *inst)
+static void free_inst(struct skcipher_instance *inst)
 {
        crypto_drop_skcipher(skcipher_instance_ctx(inst));
        kfree(inst);
@@ -400,12 +400,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
        inst->alg.encrypt = encrypt;
        inst->alg.decrypt = decrypt;
 
-       inst->free = free;
+       inst->free = free_inst;
 
        err = skcipher_register_instance(tmpl, inst);
        if (err) {
 err_free_inst:
-               free(inst);
+               free_inst(inst);
        }
        return err;
 }
index dbdd8af629e6963fd4b8de6d9b15d26fa5232ee9..6d8cea94b3cfbbc276014c3b831c640a99e0fd6f 100644 (file)
@@ -322,7 +322,7 @@ static void exit_tfm(struct crypto_skcipher *tfm)
        crypto_free_cipher(ctx->tweak);
 }
 
-static void free(struct skcipher_instance *inst)
+static void free_inst(struct skcipher_instance *inst)
 {
        crypto_drop_skcipher(skcipher_instance_ctx(inst));
        kfree(inst);
@@ -434,12 +434,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
        inst->alg.encrypt = encrypt;
        inst->alg.decrypt = decrypt;
 
-       inst->free = free;
+       inst->free = free_inst;
 
        err = skcipher_register_instance(tmpl, inst);
        if (err) {
 err_free_inst:
-               free(inst);
+               free_inst(inst);
        }
        return err;
 }
index b2263ec67b432e14cca0df18434fed68cab0373e..5832bc10aca8d5ff93354df2bcf59d00fd67640d 100644 (file)
@@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
  end:
        if (result) {
                dev_warn(&device->dev, "Failed to change power state to %s\n",
-                        acpi_power_state_string(state));
+                        acpi_power_state_string(target_state));
        } else {
                device->power.state = target_state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Device [%s] transitioned to %s\n",
                                  device->pnp.bus_id,
-                                 acpi_power_state_string(state)));
+                                 acpi_power_state_string(target_state)));
        }
 
        return result;
index b4c0152e92aa124ac0d92a778cf49a448f4a03cd..1af2125e17d53578602808a67321e8a89b794e61 100644 (file)
@@ -1994,23 +1994,35 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
                acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
 }
 
-bool acpi_ec_other_gpes_active(void)
-{
-       return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
-}
-
 bool acpi_ec_dispatch_gpe(void)
 {
        u32 ret;
 
        if (!first_ec)
+               return acpi_any_gpe_status_set(U32_MAX);
+
+       /*
+        * Report wakeup if the status bit is set for any enabled GPE other
+        * than the EC one.
+        */
+       if (acpi_any_gpe_status_set(first_ec->gpe))
+               return true;
+
+       if (ec_no_wakeup)
                return false;
 
+       /*
+        * Dispatch the EC GPE in-band, but do not report wakeup in any case
+        * to allow the caller to process events properly after that.
+        */
        ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
        if (ret == ACPI_INTERRUPT_HANDLED) {
                pm_pr_dbg("EC GPE dispatched\n");
-               return true;
+
+               /* Flush the event and query workqueues. */
+               acpi_ec_flush_work();
        }
+
        return false;
 }
 #endif /* CONFIG_PM_SLEEP */
index e387517d3354605b7f248e55ebf04d3356379959..43411a7457cd7f4520eb80bd05aa73d724db616d 100644 (file)
@@ -202,7 +202,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
 
 #ifdef CONFIG_PM_SLEEP
 void acpi_ec_flush_work(void);
-bool acpi_ec_other_gpes_active(void);
 bool acpi_ec_dispatch_gpe(void);
 #endif
 
index 4edc8a3ce40fdc50039f0460953b3e422d193efe..fd9d4e8318e9469c807538272abd6f854c927b04 100644 (file)
@@ -980,13 +980,6 @@ static int acpi_s2idle_prepare_late(void)
        return 0;
 }
 
-static void acpi_s2idle_sync(void)
-{
-       /* The EC driver uses special workqueues that need to be flushed. */
-       acpi_ec_flush_work();
-       acpi_os_wait_events_complete(); /* synchronize Notify handling */
-}
-
 static bool acpi_s2idle_wake(void)
 {
        if (!acpi_sci_irq_valid())
@@ -1013,22 +1006,12 @@ static bool acpi_s2idle_wake(void)
                if (acpi_check_wakeup_handlers())
                        return true;
 
-               /*
-                * If the status bit is set for any enabled GPE other than the
-                * EC one, the wakeup is regarded as a genuine one.
-                */
-               if (acpi_ec_other_gpes_active())
+               /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+               if (acpi_ec_dispatch_gpe())
                        return true;
 
                /*
-                * If the EC GPE status bit has not been set, the wakeup is
-                * regarded as a spurious one.
-                */
-               if (!acpi_ec_dispatch_gpe())
-                       return false;
-
-               /*
-                * Cancel the wakeup and process all pending events in case
+                * Cancel the SCI wakeup and process all pending events in case
                 * there are any wakeup ones in there.
                 *
                 * Note that if any non-EC GPEs are active at this point, the
@@ -1036,8 +1019,7 @@ static bool acpi_s2idle_wake(void)
                 * should be missed by canceling the wakeup here.
                 */
                pm_system_cancel_wakeup();
-
-               acpi_s2idle_sync();
+               acpi_os_wait_events_complete();
 
                /*
                 * The SCI is in the "suspended" state now and it cannot produce
@@ -1070,7 +1052,8 @@ static void acpi_s2idle_restore(void)
         * of GPEs.
         */
        acpi_os_wait_events_complete(); /* synchronize GPE processing */
-       acpi_s2idle_sync();
+       acpi_ec_flush_work(); /* flush the EC driver's workqueues */
+       acpi_os_wait_events_complete(); /* synchronize Notify handling */
 
        s2idle_wakeup = false;
 
index fe1523664816a49712c88b4afcdc9ee65011b955..8558b629880b17ae4906f6ee22dfa3a10e6d2e29 100644 (file)
@@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
        dev->dev.release = amba_device_release;
        dev->dev.bus = &amba_bustype;
        dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+       dev->dev.dma_parms = &dev->dma_parms;
        dev->res.name = dev_name(&dev->dev);
 }
 
index e977041047845ddc05a1edcdd754a2247cc84d09..dcfbe7251dc43b86edd347c371c306d70d32e39d 100644 (file)
@@ -256,7 +256,8 @@ static int try_to_bring_up_master(struct master *master,
        ret = master->ops->bind(master->dev);
        if (ret < 0) {
                devres_release_group(master->dev, NULL);
-               dev_info(master->dev, "master bind failed: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_info(master->dev, "master bind failed: %d\n", ret);
                return ret;
        }
 
@@ -611,8 +612,9 @@ static int component_bind(struct component *component, struct master *master,
                devres_release_group(component->dev, NULL);
                devres_release_group(master->dev, NULL);
 
-               dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
-                       dev_name(component->dev), component->ops, ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+                               dev_name(component->dev), component->ops, ret);
        }
 
        return ret;
index 139cdf7e73271a02182cf4f7cdba707ded7cce55..0cad34f1eedea8dcc88c51ab371aa4e1e978ac8a 100644 (file)
@@ -365,6 +365,7 @@ struct device_link *device_link_add(struct device *consumer,
                                link->flags |= DL_FLAG_STATELESS;
                                goto reorder;
                        } else {
+                               link->flags |= DL_FLAG_STATELESS;
                                goto out;
                        }
                }
@@ -433,12 +434,16 @@ struct device_link *device_link_add(struct device *consumer,
            flags & DL_FLAG_PM_RUNTIME)
                pm_runtime_resume(supplier);
 
+       list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+       list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
        if (flags & DL_FLAG_SYNC_STATE_ONLY) {
                dev_dbg(consumer,
                        "Linked as a sync state only consumer to %s\n",
                        dev_name(supplier));
                goto out;
        }
+
 reorder:
        /*
         * Move the consumer and all of the devices depending on it to the end
@@ -449,12 +454,9 @@ reorder:
         */
        device_reorder_to_tail(consumer, NULL);
 
-       list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
-       list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
-
        dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
 
- out:
+out:
        device_pm_unlock();
        device_links_write_unlock();
 
@@ -829,6 +831,13 @@ static void __device_links_supplier_defer_sync(struct device *sup)
                list_add_tail(&sup->links.defer_sync, &deferred_sync);
 }
 
+static void device_link_drop_managed(struct device_link *link)
+{
+       link->flags &= ~DL_FLAG_MANAGED;
+       WRITE_ONCE(link->status, DL_STATE_NONE);
+       kref_put(&link->kref, __device_link_del);
+}
+
 /**
  * device_links_driver_bound - Update device links after probing its driver.
  * @dev: Device to update the links for.
@@ -842,7 +851,7 @@ static void __device_links_supplier_defer_sync(struct device *sup)
  */
 void device_links_driver_bound(struct device *dev)
 {
-       struct device_link *link;
+       struct device_link *link, *ln;
        LIST_HEAD(sync_list);
 
        /*
@@ -882,18 +891,35 @@ void device_links_driver_bound(struct device *dev)
        else
                __device_links_queue_sync_state(dev, &sync_list);
 
-       list_for_each_entry(link, &dev->links.suppliers, c_node) {
+       list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+               struct device *supplier;
+
                if (!(link->flags & DL_FLAG_MANAGED))
                        continue;
 
-               WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
-               WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+               supplier = link->supplier;
+               if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+                       /*
+                        * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
+                        * other DL_MANAGED_LINK_FLAGS have been set. So, it's
+                        * save to drop the managed link completely.
+                        */
+                       device_link_drop_managed(link);
+               } else {
+                       WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+                       WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+               }
 
+               /*
+                * This needs to be done even for the deleted
+                * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
+                * device link that was preventing the supplier from getting a
+                * sync_state() call.
+                */
                if (defer_sync_state_count)
-                       __device_links_supplier_defer_sync(link->supplier);
+                       __device_links_supplier_defer_sync(supplier);
                else
-                       __device_links_queue_sync_state(link->supplier,
-                                                       &sync_list);
+                       __device_links_queue_sync_state(supplier, &sync_list);
        }
 
        dev->links.status = DL_DEV_DRIVER_BOUND;
@@ -903,13 +929,6 @@ void device_links_driver_bound(struct device *dev)
        device_links_flush_sync_list(&sync_list, dev);
 }
 
-static void device_link_drop_managed(struct device_link *link)
-{
-       link->flags &= ~DL_FLAG_MANAGED;
-       WRITE_ONCE(link->status, DL_STATE_NONE);
-       kref_put(&link->kref, __device_link_del);
-}
-
 /**
  * __device_links_no_driver - Update links of a device without a driver.
  * @dev: Device without a drvier.
@@ -2370,6 +2389,11 @@ u32 fw_devlink_get_flags(void)
        return fw_devlink_flags;
 }
 
+static bool fw_devlink_is_permissive(void)
+{
+       return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
 /**
  * device_add - add device to device hierarchy.
  * @dev: device.
@@ -2524,7 +2548,7 @@ int device_add(struct device *dev)
        if (fw_devlink_flags && is_fwnode_dev &&
            fwnode_has_op(dev->fwnode, add_links)) {
                fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
-               if (fw_ret == -ENODEV)
+               if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
                        device_link_wait_for_mandatory_supplier(dev);
                else if (fw_ret)
                        device_link_wait_for_optional_supplier(dev);
index 06ec0e851fa168f734641514475659e07d7d9e3c..94037be7f5d75cc88cd2f1321a8ee66c8f37b210 100644 (file)
@@ -224,17 +224,9 @@ static int deferred_devs_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(deferred_devs);
 
-#ifdef CONFIG_MODULES
-/*
- * In the case of modules, set the default probe timeout to
- * 30 seconds to give userland some time to load needed modules
- */
-int driver_deferred_probe_timeout = 30;
-#else
-/* In the case of !modules, no probe timeout needed */
-int driver_deferred_probe_timeout = -1;
-#endif
+int driver_deferred_probe_timeout;
 EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
+static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
 
 static int __init deferred_probe_timeout_setup(char *str)
 {
@@ -266,8 +258,8 @@ int driver_deferred_probe_check_state(struct device *dev)
                return -ENODEV;
        }
 
-       if (!driver_deferred_probe_timeout) {
-               dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+       if (!driver_deferred_probe_timeout && initcalls_done) {
+               dev_warn(dev, "deferred probe timeout, ignoring dependency");
                return -ETIMEDOUT;
        }
 
@@ -284,6 +276,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
 
        list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
                dev_info(private->device, "deferred probe pending");
+       wake_up(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
 
@@ -658,6 +651,9 @@ int driver_probe_done(void)
  */
 void wait_for_device_probe(void)
 {
+       /* wait for probe timeout */
+       wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
+
        /* wait for the deferred probe workqueue to finish */
        flush_work(&deferred_probe_work);
 
index 5255550b7c34f1aca050d76785c09713049b2e1c..b27d0f6c18c9c0eeec0c46ade6db1c338ec0b1a4 100644 (file)
@@ -380,6 +380,8 @@ struct platform_object {
  */
 static void setup_pdev_dma_masks(struct platform_device *pdev)
 {
+       pdev->dev.dma_parms = &pdev->dma_parms;
+
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        if (!pdev->dev.dma_mask) {
index 8efd8778e2095e4a4ce6606f36effb5882339cd4..ce9e33603a4d9540df90f2c7065332a0a4d112ae 100644 (file)
@@ -1535,6 +1535,13 @@ static void null_config_discard(struct nullb *nullb)
 {
        if (nullb->dev->discard == false)
                return;
+
+       if (nullb->dev->zoned) {
+               nullb->dev->discard = false;
+               pr_info("discard option is ignored in zoned mode\n");
+               return;
+       }
+
        nullb->q->limits.discard_granularity = nullb->dev->blocksize;
        nullb->q->limits.discard_alignment = nullb->dev->blocksize;
        blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
index 9e4bcdad1a80f700562a447cca6c533b828369d0..ed5458f2d367de26264fb754fbfa6a1598eca269 100644 (file)
@@ -23,6 +23,10 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
                pr_err("zone_size must be power-of-two\n");
                return -EINVAL;
        }
+       if (dev->zone_size > dev->size) {
+               pr_err("Zone size larger than device capacity\n");
+               return -EINVAL;
+       }
 
        dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
        dev->nr_zones = dev_size >>
index 93468b7c67019b3af6dd16cf639f11bb8dd77337..9d21bf0f155eed65fa150d71e6b92104fd701960 100644 (file)
@@ -33,6 +33,15 @@ struct virtio_blk_vq {
 } ____cacheline_aligned_in_smp;
 
 struct virtio_blk {
+       /*
+        * This mutex must be held by anything that may run after
+        * virtblk_remove() sets vblk->vdev to NULL.
+        *
+        * blk-mq, virtqueue processing, and sysfs attribute code paths are
+        * shut down before vblk->vdev is set to NULL and therefore do not need
+        * to hold this mutex.
+        */
+       struct mutex vdev_mutex;
        struct virtio_device *vdev;
 
        /* The disk structure for the kernel. */
@@ -44,6 +53,13 @@ struct virtio_blk {
        /* Process context for config space updates */
        struct work_struct config_work;
 
+       /*
+        * Tracks references from block_device_operations open/release and
+        * virtio_driver probe/remove so this object can be freed once no
+        * longer in use.
+        */
+       refcount_t refs;
+
        /* What host tells us, plus 2 for header & tailer. */
        unsigned int sg_elems;
 
@@ -295,10 +311,55 @@ out:
        return err;
 }
 
+static void virtblk_get(struct virtio_blk *vblk)
+{
+       refcount_inc(&vblk->refs);
+}
+
+static void virtblk_put(struct virtio_blk *vblk)
+{
+       if (refcount_dec_and_test(&vblk->refs)) {
+               ida_simple_remove(&vd_index_ida, vblk->index);
+               mutex_destroy(&vblk->vdev_mutex);
+               kfree(vblk);
+       }
+}
+
+static int virtblk_open(struct block_device *bd, fmode_t mode)
+{
+       struct virtio_blk *vblk = bd->bd_disk->private_data;
+       int ret = 0;
+
+       mutex_lock(&vblk->vdev_mutex);
+
+       if (vblk->vdev)
+               virtblk_get(vblk);
+       else
+               ret = -ENXIO;
+
+       mutex_unlock(&vblk->vdev_mutex);
+       return ret;
+}
+
+static void virtblk_release(struct gendisk *disk, fmode_t mode)
+{
+       struct virtio_blk *vblk = disk->private_data;
+
+       virtblk_put(vblk);
+}
+
 /* We provide getgeo only to please some old bootloader/partitioning tools */
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
        struct virtio_blk *vblk = bd->bd_disk->private_data;
+       int ret = 0;
+
+       mutex_lock(&vblk->vdev_mutex);
+
+       if (!vblk->vdev) {
+               ret = -ENXIO;
+               goto out;
+       }
 
        /* see if the host passed in geometry config */
        if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@@ -314,11 +375,15 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
                geo->sectors = 1 << 5;
                geo->cylinders = get_capacity(bd->bd_disk) >> 11;
        }
-       return 0;
+out:
+       mutex_unlock(&vblk->vdev_mutex);
+       return ret;
 }
 
 static const struct block_device_operations virtblk_fops = {
        .owner  = THIS_MODULE,
+       .open = virtblk_open,
+       .release = virtblk_release,
        .getgeo = virtblk_getgeo,
 };
 
@@ -655,6 +720,10 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_free_index;
        }
 
+       /* This reference is dropped in virtblk_remove(). */
+       refcount_set(&vblk->refs, 1);
+       mutex_init(&vblk->vdev_mutex);
+
        vblk->vdev = vdev;
        vblk->sg_elems = sg_elems;
 
@@ -820,8 +889,6 @@ out:
 static void virtblk_remove(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
-       int index = vblk->index;
-       int refc;
 
        /* Make sure no work handler is accessing the device. */
        flush_work(&vblk->config_work);
@@ -831,18 +898,21 @@ static void virtblk_remove(struct virtio_device *vdev)
 
        blk_mq_free_tag_set(&vblk->tag_set);
 
+       mutex_lock(&vblk->vdev_mutex);
+
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
 
-       refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
+       /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
+       vblk->vdev = NULL;
+
        put_disk(vblk->disk);
        vdev->config->del_vqs(vdev);
        kfree(vblk->vqs);
-       kfree(vblk);
 
-       /* Only free device id if we don't have any users */
-       if (refc == 1)
-               ida_simple_remove(&vd_index_ida, index);
+       mutex_unlock(&vblk->vdev_mutex);
+
+       virtblk_put(vblk);
 }
 
 #ifdef CONFIG_PM_SLEEP
index b38359c480eacecdb8c324cc98ea260ad59e6cef..1f8c82603179de38ac1395b4923822cf9d7dcecb 100644 (file)
@@ -291,6 +291,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
        }
 
        /* Setup cmd context */
+       ret = -ENOMEM;
        mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
                                                sizeof(*mhi_ctxt->cmd_ctxt) *
                                                NR_OF_CMD_RINGS,
@@ -812,10 +813,9 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
        if (!mhi_cntrl)
                return -EINVAL;
 
-       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put)
-               return -EINVAL;
-
-       if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status)
+       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
+           !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
+           !mhi_cntrl->write_reg)
                return -EINVAL;
 
        ret = parse_config(mhi_cntrl, config);
@@ -1101,6 +1101,7 @@ static int mhi_driver_probe(struct device *dev)
                }
        }
 
+       ret = -EINVAL;
        if (dl_chan) {
                /*
                 * If channel supports LPM notifications then status_cb should
index 5deadfaa053a811212e7d15256159dda3b39d0f5..095d95bc0e3741c77970124ac898e734aedcb64e 100644 (file)
@@ -11,9 +11,6 @@
 
 extern struct bus_type mhi_bus_type;
 
-/* MHI MMIO register mapping */
-#define PCI_INVALID_READ(val) (val == U32_MAX)
-
 #define MHIREGLEN (0x0)
 #define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
 #define MHIREGLEN_MHIREGLEN_SHIFT (0)
index eb4256b81406a2e9144926effb5eabcb3f9e6ee9..97e06cc586e4f7cda57d3e22be9f585269d09d17 100644 (file)
 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
                              void __iomem *base, u32 offset, u32 *out)
 {
-       u32 tmp = readl(base + offset);
-
-       /* If there is any unexpected value, query the link status */
-       if (PCI_INVALID_READ(tmp) &&
-           mhi_cntrl->link_status(mhi_cntrl))
-               return -EIO;
-
-       *out = tmp;
-
-       return 0;
+       return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
 }
 
 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
@@ -49,7 +40,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
                   u32 offset, u32 val)
 {
-       writel(val, base + offset);
+       mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
 }
 
 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
@@ -294,7 +285,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
                    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
                        continue;
                mhi_dev = mhi_alloc_device(mhi_cntrl);
-               if (!mhi_dev)
+               if (IS_ERR(mhi_dev))
                        return;
 
                mhi_dev->dev_type = MHI_DEVICE_XFER;
@@ -336,7 +327,8 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl)
 
                /* Channel name is same for both UL and DL */
                mhi_dev->chan_name = mhi_chan->name;
-               dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan,
+               dev_set_name(&mhi_dev->dev, "%s_%s",
+                            dev_name(mhi_cntrl->cntrl_dev),
                             mhi_dev->chan_name);
 
                /* Init wakeup source if available */
index 52690cb5c89cb5a4602494d04aed8513e4dfb1d4..dc83d65f7784e26657deda2f83ae7a355918f44c 100644 (file)
@@ -902,7 +902,11 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
                           MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
                           msecs_to_jiffies(mhi_cntrl->timeout_ms));
 
-       return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO;
+       ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
+       if (ret)
+               mhi_power_down(mhi_cntrl, false);
+
+       return ret;
 }
 EXPORT_SYMBOL(mhi_sync_power_up);
 
index b7145f370d3bb08b1eb230f96ac1db69783e6ee8..2704470e021dc8bc600fdb4de691216dd8f987d3 100644 (file)
@@ -1947,8 +1947,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
        if (adev->type != &i2c_adapter_type)
                return 0;
 
-       addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
-                                                &addr_info->binfo);
+       addr_info->added_client = i2c_new_client_device(to_i2c_adapter(adev),
+                                                       &addr_info->binfo);
 
        if (!addr_info->adapter_name)
                return 1; /* Only try the first I2C adapter by default. */
index 39c59f063aa07eb5661a80bac9d2f7b38fb8a0bd..2dfb30b963c47c7ed3c35b5cc8e17b6a379252c8 100644 (file)
@@ -3519,6 +3519,9 @@ static int __clk_core_init(struct clk_core *core)
 out:
        clk_pm_runtime_put(core);
 unlock:
+       if (ret)
+               hlist_del_init(&core->child_node);
+
        clk_prepare_unlock();
 
        if (!ret)
index 11ec6f46646782fe6be893757d9e28e63feabdc6..abb121f8de527707c906b5a8b6bec621c887b110 100644 (file)
@@ -377,6 +377,7 @@ config SM_GCC_8150
 
 config SM_GCC_8250
        tristate "SM8250 Global Clock Controller"
+       select QCOM_GDSC
        help
          Support for the global clock controller on SM8250 devices.
          Say Y if you want to use peripheral devices such as UART,
index ef98fdc51755c5cc46849bcf7aaafdfc5d362ce1..732bc7c937e6d80c4cded5cf17b0e8ec16c7b48d 100644 (file)
@@ -76,8 +76,7 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
        .clkr.hw.init = &(struct clk_init_data){
                .name = "gpll0_out_even",
                .parent_data = &(const struct clk_parent_data){
-                       .fw_name = "bi_tcxo",
-                       .name = "bi_tcxo",
+                       .hw = &gpll0.clkr.hw,
                },
                .num_parents = 1,
                .ops = &clk_trion_pll_postdiv_ops,
index d17cfb7a3ff4bbee6ac134f9d83a4401b20e25ce..d7243c09cc843a7ebd1e15a5f1bf926a4bf5faca 100644 (file)
@@ -156,8 +156,6 @@ PNAME(mux_i2s_out_p)                = { "i2s1_pre", "xin12m" };
 PNAME(mux_i2s2_p)              = { "i2s2_src", "i2s2_frac", "xin12m" };
 PNAME(mux_sclk_spdif_p)                = { "sclk_spdif_src", "spdif_frac", "xin12m" };
 
-PNAME(mux_aclk_gpu_pre_p)      = { "cpll_gpu", "gpll_gpu", "hdmiphy_gpu", "usb480m_gpu" };
-
 PNAME(mux_uart0_p)             = { "uart0_src", "uart0_frac", "xin24m" };
 PNAME(mux_uart1_p)             = { "uart1_src", "uart1_frac", "xin24m" };
 PNAME(mux_uart2_p)             = { "uart2_src", "uart2_frac", "xin24m" };
@@ -468,16 +466,9 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
                        RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
                        RK2928_CLKGATE_CON(2), 8, GFLAGS),
 
-       GATE(0, "cpll_gpu", "cpll", 0,
-                       RK2928_CLKGATE_CON(3), 13, GFLAGS),
-       GATE(0, "gpll_gpu", "gpll", 0,
-                       RK2928_CLKGATE_CON(3), 13, GFLAGS),
-       GATE(0, "hdmiphy_gpu", "hdmiphy", 0,
-                       RK2928_CLKGATE_CON(3), 13, GFLAGS),
-       GATE(0, "usb480m_gpu", "usb480m", 0,
+       COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_4plls_p, 0,
+                       RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 13, GFLAGS),
-       COMPOSITE_NOGATE(0, "aclk_gpu_pre", mux_aclk_gpu_pre_p, 0,
-                       RK2928_CLKSEL_CON(34), 5, 2, MFLAGS, 0, 5, DFLAGS),
 
        COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_2plls_p, 0,
                        RK2928_CLKSEL_CON(25), 8, 1, MFLAGS, 0, 7, DFLAGS,
@@ -582,8 +573,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
        GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
 
        /* PD_GPU */
-       GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
-       GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
+       GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+       GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
 
        /* PD_BUS */
        GATE(0, "sclk_initmem_mbist", "aclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
index 64e229ddf2a58e412e222b37f2d6f10a856070d3..e931319dcc9d5dfc98212f2a7e2d1d57cfed7c66 100644 (file)
@@ -1292,7 +1292,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
        { TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 },
        { TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 },
        { TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 },
-       { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 0 },
+       { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 282240000, 0 },
        { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 },
        { TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
        { TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 },
index e001b9bcb6bf7d06ce4ea12a4e43f6d32fabcd37..7dc30dd6c8d552fa7d42c75b9c492ff489cdd8d5 100644 (file)
@@ -212,7 +212,7 @@ static const struct omap_clkctrl_reg_data am3_mpu_clkctrl_regs[] __initconst = {
 };
 
 static const struct omap_clkctrl_reg_data am3_l4_rtc_clkctrl_regs[] __initconst = {
-       { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+       { AM3_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk-24mhz-clkctrl:0000:0" },
        { 0 },
 };
 
index 062266034d84669a40455020c91374c59c492996..864c484bde1b4d61b3d8acb7d7120fd1fb4604b7 100644 (file)
@@ -255,24 +255,53 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
        return entry->clk;
 }
 
+/* Get clkctrl clock base name based on clkctrl_name or dts node */
+static const char * __init clkctrl_get_clock_name(struct device_node *np,
+                                                 const char *clkctrl_name,
+                                                 int offset, int index,
+                                                 bool legacy_naming)
+{
+       char *clock_name;
+
+       /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
+       if (clkctrl_name && !legacy_naming) {
+               clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
+                                      clkctrl_name, offset, index);
+               strreplace(clock_name, '_', '-');
+
+               return clock_name;
+       }
+
+       /* l4per:1234:0 old style naming based on clkctrl_name */
+       if (clkctrl_name)
+               return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
+                                clkctrl_name, offset, index);
+
+       /* l4per_cm:1234:0 old style naming based on parent node name */
+       if (legacy_naming)
+               return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
+                                np->parent, offset, index);
+
+       /* l4per-clkctrl:1234:0 style naming based on node name */
+       return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
+}
+
 static int __init
 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
                         struct device_node *node, struct clk_hw *clk_hw,
                         u16 offset, u8 bit, const char * const *parents,
-                        int num_parents, const struct clk_ops *ops)
+                        int num_parents, const struct clk_ops *ops,
+                        const char *clkctrl_name)
 {
        struct clk_init_data init = { NULL };
        struct clk *clk;
        struct omap_clkctrl_clk *clkctrl_clk;
        int ret = 0;
 
-       if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
-               init.name = kasprintf(GFP_KERNEL, "%pOFn:%pOFn:%04x:%d",
-                                     node->parent, node, offset,
-                                     bit);
-       else
-               init.name = kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", node,
-                                     offset, bit);
+       init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
+                                          ti_clk_get_features()->flags &
+                                          TI_CLK_CLKCTRL_COMPAT);
+
        clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
        if (!init.name || !clkctrl_clk) {
                ret = -ENOMEM;
@@ -309,7 +338,7 @@ static void __init
 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
                       struct device_node *node, u16 offset,
                       const struct omap_clkctrl_bit_data *data,
-                      void __iomem *reg)
+                      void __iomem *reg, const char *clkctrl_name)
 {
        struct clk_hw_omap *clk_hw;
 
@@ -322,7 +351,7 @@ _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
 
        if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
                                     data->bit, data->parents, 1,
-                                    &omap_gate_clk_ops))
+                                    &omap_gate_clk_ops, clkctrl_name))
                kfree(clk_hw);
 }
 
@@ -330,7 +359,7 @@ static void __init
 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
                      struct device_node *node, u16 offset,
                      const struct omap_clkctrl_bit_data *data,
-                     void __iomem *reg)
+                     void __iomem *reg, const char *clkctrl_name)
 {
        struct clk_omap_mux *mux;
        int num_parents = 0;
@@ -357,7 +386,7 @@ _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
 
        if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
                                     data->bit, data->parents, num_parents,
-                                    &ti_clk_mux_ops))
+                                    &ti_clk_mux_ops, clkctrl_name))
                kfree(mux);
 }
 
@@ -365,7 +394,7 @@ static void __init
 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
                      struct device_node *node, u16 offset,
                      const struct omap_clkctrl_bit_data *data,
-                     void __iomem *reg)
+                     void __iomem *reg, const char *clkctrl_name)
 {
        struct clk_omap_divider *div;
        const struct omap_clkctrl_div_data *div_data = data->data;
@@ -393,7 +422,7 @@ _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
 
        if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
                                     data->bit, data->parents, 1,
-                                    &ti_clk_divider_ops))
+                                    &ti_clk_divider_ops, clkctrl_name))
                kfree(div);
 }
 
@@ -401,7 +430,7 @@ static void __init
 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
                          struct device_node *node,
                          const struct omap_clkctrl_reg_data *data,
-                         void __iomem *reg)
+                         void __iomem *reg, const char *clkctrl_name)
 {
        const struct omap_clkctrl_bit_data *bits = data->bit_data;
 
@@ -412,17 +441,17 @@ _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
                switch (bits->type) {
                case TI_CLK_GATE:
                        _ti_clkctrl_setup_gate(provider, node, data->offset,
-                                              bits, reg);
+                                              bits, reg, clkctrl_name);
                        break;
 
                case TI_CLK_DIVIDER:
                        _ti_clkctrl_setup_div(provider, node, data->offset,
-                                             bits, reg);
+                                             bits, reg, clkctrl_name);
                        break;
 
                case TI_CLK_MUX:
                        _ti_clkctrl_setup_mux(provider, node, data->offset,
-                                             bits, reg);
+                                             bits, reg, clkctrl_name);
                        break;
 
                default:
@@ -461,42 +490,10 @@ static char * __init clkctrl_get_name(struct device_node *np)
                        return name;
                }
        }
-       of_node_put(np);
 
        return NULL;
 }
 
-/* Get clkctrl clock base name based on clkctrl_name or dts node */
-static const char * __init clkctrl_get_clock_name(struct device_node *np,
-                                                 const char *clkctrl_name,
-                                                 int offset, int index,
-                                                 bool legacy_naming)
-{
-       char *clock_name;
-
-       /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
-       if (clkctrl_name && !legacy_naming) {
-               clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
-                                      clkctrl_name, offset, index);
-               strreplace(clock_name, '_', '-');
-
-               return clock_name;
-       }
-
-       /* l4per:1234:0 old style naming based on clkctrl_name */
-       if (clkctrl_name)
-               return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
-                                clkctrl_name, offset, index);
-
-       /* l4per_cm:1234:0 old style naming based on parent node name */
-       if (legacy_naming)
-               return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
-                                np->parent, offset, index);
-
-       /* l4per-clkctrl:1234:0 style naming based on node name */
-       return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
-}
-
 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
 {
        struct omap_clkctrl_provider *provider;
@@ -664,7 +661,7 @@ clkdm_found:
                hw->enable_reg.ptr = provider->base + reg_data->offset;
 
                _ti_clkctrl_setup_subclks(provider, node, reg_data,
-                                         hw->enable_reg.ptr);
+                                         hw->enable_reg.ptr, clkctrl_name);
 
                if (reg_data->flags & CLKF_SW_SUP)
                        hw->enable_bit = MODULEMODE_SWCTRL;
index b05da8516d4c94d96d4d3fdc045721856f595072..f9f4babe3ca6da96646e99dd12648c1a2b335013 100644 (file)
@@ -206,6 +206,7 @@ static int integrator_impd1_clk_spawn(struct device *dev,
                return -ENODEV;
        }
 
+       of_property_read_string(np, "clock-output-names", &name);
        parent_name = of_clk_get_parent_name(np, 0);
        clk = icst_clk_setup(NULL, desc, name, parent_name, map,
                             ICST_INTEGRATOR_IM_PD1);
index 4d1e25d1ced18e878859346b35dc81d2e864f95f..4d3429b2058fcc5f16582b53c516922c263d56ae 100644 (file)
@@ -1059,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
        update_turbo_state();
        if (global.turbo_disabled) {
-               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+               pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
                mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
index b7bb7c30adeb823d92fd5f233d40e0ba644f0afa..b2f9882bc010f1619b0e17eb2204caaecf41895c 100644 (file)
@@ -963,10 +963,12 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
        struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
        struct aead_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = rctx->edesc;
+       has_bklog = edesc->bklog;
 
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
@@ -979,7 +981,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                aead_request_complete(req, ecode);
        else
                crypto_finalize_aead_request(jrp->engine, req, ecode);
@@ -995,10 +997,12 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
        struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
        int ivsize = crypto_skcipher_ivsize(skcipher);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = rctx->edesc;
+       has_bklog = edesc->bklog;
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
 
@@ -1028,7 +1032,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                skcipher_request_complete(req, ecode);
        else
                crypto_finalize_skcipher_request(jrp->engine, req, ecode);
@@ -1711,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 
        if (ivsize || mapped_dst_nents > 1)
                sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
-                                   mapped_dst_nents);
+                                   mapped_dst_nents - 1 + !!ivsize);
 
        if (sec4_sg_bytes) {
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
index 943bc0296267d3a751c5493358a2a291ff3b5af2..27ff4a3d037ee6927f5a44c1072bdb7f266c933b 100644 (file)
@@ -583,10 +583,12 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = state->edesc;
+       has_bklog = edesc->bklog;
 
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
@@ -603,7 +605,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                req->base.complete(&req->base, ecode);
        else
                crypto_finalize_hash_request(jrp->engine, req, ecode);
@@ -632,10 +634,12 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
        struct caam_hash_state *state = ahash_request_ctx(req);
        int digestsize = crypto_ahash_digestsize(ahash);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = state->edesc;
+       has_bklog = edesc->bklog;
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
 
@@ -663,7 +667,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                req->base.complete(&req->base, ecode);
        else
                crypto_finalize_hash_request(jrp->engine, req, ecode);
index 4fcae37a2e33c1943c99b2945ea7703233d155c2..2e44d685618fded647b016c6a4e250ae40971032 100644 (file)
@@ -121,11 +121,13 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
        struct rsa_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        if (err)
                ecode = caam_jr_strstatus(dev, err);
 
        edesc = req_ctx->edesc;
+       has_bklog = edesc->bklog;
 
        rsa_pub_unmap(dev, edesc, req);
        rsa_io_unmap(dev, edesc, req);
@@ -135,7 +137,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                akcipher_request_complete(req, ecode);
        else
                crypto_finalize_akcipher_request(jrp->engine, req, ecode);
@@ -152,11 +154,13 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
        struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
        struct rsa_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        if (err)
                ecode = caam_jr_strstatus(dev, err);
 
        edesc = req_ctx->edesc;
+       has_bklog = edesc->bklog;
 
        switch (key->priv_form) {
        case FORM1:
@@ -176,7 +180,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                akcipher_request_complete(req, ecode);
        else
                crypto_finalize_akcipher_request(jrp->engine, req, ecode);
index e92b352fb0ad6885f75b900e0d36e46e33c9b4dd..43d9e24201104b9c33d402ee60b42d573c62cc58 100644 (file)
@@ -673,41 +673,14 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
        return 0;
 }
 
-/*
- * chcr_write_cpl_set_tcb_ulp: update tcb values.
- * TCB is responsible to create tcp headers, so all the related values
- * should be correctly updated.
- * @tx_info - driver specific tls info.
- * @q - tx queue on which packet is going out.
- * @tid - TCB identifier.
- * @pos - current index where should we start writing.
- * @word - TCB word.
- * @mask - TCB word related mask.
- * @val - TCB word related value.
- * @reply - set 1 if looking for TP response.
- * return - next position to write.
- */
-static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
-                                       struct sge_eth_txq *q, u32 tid,
-                                       void *pos, u16 word, u64 mask,
+static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+                                       u32 tid, void *pos, u16 word, u64 mask,
                                        u64 val, u32 reply)
 {
        struct cpl_set_tcb_field_core *cpl;
        struct ulptx_idata *idata;
        struct ulp_txpkt *txpkt;
-       void *save_pos = NULL;
-       u8 buf[48] = {0};
-       int left;
 
-       left = (void *)q->q.stat - pos;
-       if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
-               if (!left) {
-                       pos = q->q.desc;
-               } else {
-                       save_pos = pos;
-                       pos = buf;
-               }
-       }
        /* ULP_TXPKT */
        txpkt = pos;
        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
@@ -732,18 +705,54 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
        idata = (struct ulptx_idata *)(cpl + 1);
        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
        idata->len = htonl(0);
+       pos = idata + 1;
 
-       if (save_pos) {
-               pos = chcr_copy_to_txd(buf, &q->q, save_pos,
-                                      CHCR_SET_TCB_FIELD_LEN);
-       } else {
-               /* check again if we are at the end of the queue */
-               if (left == CHCR_SET_TCB_FIELD_LEN)
+       return pos;
+}
+
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+                                       struct sge_eth_txq *q, u32 tid,
+                                       void *pos, u16 word, u64 mask,
+                                       u64 val, u32 reply)
+{
+       int left = (void *)q->q.stat - pos;
+
+       if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+               if (!left) {
                        pos = q->q.desc;
-               else
-                       pos = idata + 1;
+               } else {
+                       u8 buf[48] = {0};
+
+                       __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+                                                    mask, val, reply);
+
+                       return chcr_copy_to_txd(buf, &q->q, pos,
+                                               CHCR_SET_TCB_FIELD_LEN);
+               }
        }
 
+       pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+                                          mask, val, reply);
+
+       /* check again if we are at the end of the queue */
+       if (left == CHCR_SET_TCB_FIELD_LEN)
+               pos = q->q.desc;
+
        return pos;
 }
 
index dccef3a2908b391e772944d504d953a062001d9f..e1401d9cc756cea07f7ad310c17fed29e0f3e9db 100644 (file)
@@ -682,7 +682,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
                                make_tx_data_wr(sk, skb, immdlen, len,
                                                credits_needed, completion);
                        tp->snd_nxt += len;
-                       tp->lsndtime = tcp_time_stamp(tp);
+                       tp->lsndtime = tcp_jiffies32;
                        if (completion)
                                ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
                } else {
index 3d0a7e702c94c97d9ef74a5ee158fc55d152fc70..1e678bdf5aed07870c0fc071846ac5ebd4744a81 100644 (file)
@@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *dev)
        resource_size_t kmem_size;
        resource_size_t kmem_end;
        struct resource *new_res;
+       const char *new_res_name;
        int numa_node;
        int rc;
 
@@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *dev)
        kmem_size &= ~(memory_block_size_bytes() - 1);
        kmem_end = kmem_start + kmem_size;
 
-       /* Region is permanently reserved.  Hot-remove not yet implemented. */
-       new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
+       new_res_name = kstrdup(dev_name(dev), GFP_KERNEL);
+       if (!new_res_name)
+               return -ENOMEM;
+
+       /* Region is permanently reserved if hotremove fails. */
+       new_res = request_mem_region(kmem_start, kmem_size, new_res_name);
        if (!new_res) {
                dev_warn(dev, "could not reserve region [%pa-%pa]\n",
                         &kmem_start, &kmem_end);
+               kfree(new_res_name);
                return -EBUSY;
        }
 
@@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *dev)
         * unknown to us that will break add_memory() below.
         */
        new_res->flags = IORESOURCE_SYSTEM_RAM;
-       new_res->name = dev_name(dev);
 
        rc = add_memory(numa_node, new_res->start, resource_size(new_res));
        if (rc) {
                release_resource(new_res);
                kfree(new_res);
+               kfree(new_res_name);
                return rc;
        }
        dev_dax->dax_kmem_res = new_res;
@@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct device *dev)
        struct resource *res = dev_dax->dax_kmem_res;
        resource_size_t kmem_start = res->start;
        resource_size_t kmem_size = resource_size(res);
+       const char *res_name = res->name;
        int rc;
 
        /*
@@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct device *dev)
        /* Release and free dax resources */
        release_resource(res);
        kfree(res);
+       kfree(res_name);
        dev_dax->dax_kmem_res = NULL;
 
        return 0;
index ccc9eda1bc282851a6b4d2777297de271b03b029..07df88f2e3057e0c9def11f24b3d430823d30350 100644 (file)
@@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
 
                return ret;
 
-       case DMA_BUF_SET_NAME:
+       case DMA_BUF_SET_NAME_A:
+       case DMA_BUF_SET_NAME_B:
                return dma_buf_set_name(dmabuf, (const char __user *)arg);
 
        default:
@@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
  * calls attach() of dma_buf_ops to allow device-specific attach functionality
  * @dmabuf:            [in]    buffer to attach device to.
  * @dev:               [in]    device to be attached.
- * @importer_ops       [in]    importer operations for the attachment
- * @importer_priv      [in]    importer private pointer for the attachment
+ * @importer_ops:      [in]    importer operations for the attachment
+ * @importer_priv:     [in]    importer private pointer for the attachment
  *
  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
  * must be cleaned up by calling dma_buf_detach().
index 0924836443152fb52d7923f85cc87ca3655cb562..023db6883d0518521b8e416a01cc4f9de22d3e83 100644 (file)
@@ -241,7 +241,8 @@ config FSL_RAID
 
 config HISI_DMA
        tristate "HiSilicon DMA Engine support"
-       depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+       depends on ARM64 || COMPILE_TEST
+       depends on PCI_MSI
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index 4830ba658ce18c387f6e6f138519f73014c90872..d31076d9ef258899ef9db3bf633ec828f5d616fa 100644 (file)
@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
        struct dma_chan_dev *chan_dev;
 
        chan_dev = container_of(dev, typeof(*chan_dev), device);
-       if (atomic_dec_and_test(chan_dev->idr_ref)) {
-               ida_free(&dma_ida, chan_dev->dev_id);
-               kfree(chan_dev->idr_ref);
-       }
        kfree(chan_dev);
 }
 
@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
 }
 
 static int __dma_async_device_channel_register(struct dma_device *device,
-                                              struct dma_chan *chan,
-                                              int chan_id)
+                                              struct dma_chan *chan)
 {
        int rc = 0;
-       int chancnt = device->chancnt;
-       atomic_t *idr_ref;
-       struct dma_chan *tchan;
-
-       tchan = list_first_entry_or_null(&device->channels,
-                                        struct dma_chan, device_node);
-       if (!tchan)
-               return -ENODEV;
-
-       if (tchan->dev) {
-               idr_ref = tchan->dev->idr_ref;
-       } else {
-               idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
-               if (!idr_ref)
-                       return -ENOMEM;
-               atomic_set(idr_ref, 0);
-       }
 
        chan->local = alloc_percpu(typeof(*chan->local));
        if (!chan->local)
@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
         * When the chan_id is a negative value, we are dynamically adding
         * the channel. Otherwise we are static enumerating.
         */
-       chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+       mutex_lock(&device->chan_mutex);
+       chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
+       mutex_unlock(&device->chan_mutex);
+       if (chan->chan_id < 0) {
+               pr_err("%s: unable to alloc ida for chan: %d\n",
+                      __func__, chan->chan_id);
+               goto err_out;
+       }
+
        chan->dev->device.class = &dma_devclass;
        chan->dev->device.parent = device->dev;
        chan->dev->chan = chan;
-       chan->dev->idr_ref = idr_ref;
        chan->dev->dev_id = device->dev_id;
-       atomic_inc(idr_ref);
        dev_set_name(&chan->dev->device, "dma%dchan%d",
                     device->dev_id, chan->chan_id);
-
        rc = device_register(&chan->dev->device);
        if (rc)
-               goto err_out;
+               goto err_out_ida;
        chan->client_count = 0;
-       device->chancnt = chan->chan_id + 1;
+       device->chancnt++;
 
        return 0;
 
+ err_out_ida:
+       mutex_lock(&device->chan_mutex);
+       ida_free(&device->chan_ida, chan->chan_id);
+       mutex_unlock(&device->chan_mutex);
  err_out:
        free_percpu(chan->local);
        kfree(chan->dev);
-       if (atomic_dec_return(idr_ref) == 0)
-               kfree(idr_ref);
        return rc;
 }
 
@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
 {
        int rc;
 
-       rc = __dma_async_device_channel_register(device, chan, -1);
+       rc = __dma_async_device_channel_register(device, chan);
        if (rc < 0)
                return rc;
 
@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
        device->chancnt--;
        chan->dev->chan = NULL;
        mutex_unlock(&dma_list_mutex);
+       mutex_lock(&device->chan_mutex);
+       ida_free(&device->chan_ida, chan->chan_id);
+       mutex_unlock(&device->chan_mutex);
        device_unregister(&chan->dev->device);
        free_percpu(chan->local);
 }
@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       int rc, i = 0;
+       int rc;
        struct dma_chan* chan;
 
        if (!device)
@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
        if (rc != 0)
                return rc;
 
+       mutex_init(&device->chan_mutex);
+       ida_init(&device->chan_ida);
+
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
-               rc = __dma_async_device_channel_register(device, chan, i++);
+               rc = __dma_async_device_channel_register(device, chan);
                if (rc < 0)
                        goto err_out;
        }
@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
         */
        dma_cap_set(DMA_PRIVATE, device->cap_mask);
        dma_channel_rebalance();
+       ida_free(&dma_ida, device->dev_id);
        dma_device_put(device);
        mutex_unlock(&dma_list_mutex);
 }
index a2cadfa2e6d7848e8d4a163a0a6e5f387b26622c..0425984db118a9a88735b96b155e365a66fe3969 100644 (file)
@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
                struct dmatest_thread *thread;
 
                list_for_each_entry(thread, &dtc->threads, node) {
-                       if (!thread->done)
+                       if (!thread->done && !thread->pending)
                                return true;
                }
        }
@@ -662,8 +662,8 @@ static int dmatest_func(void *data)
                flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        ktime = ktime_get();
-       while (!kthread_should_stop()
-              && !(params->iterations && total_tests >= params->iterations)) {
+       while (!(kthread_should_stop() ||
+              (params->iterations && total_tests >= params->iterations))) {
                struct dma_async_tx_descriptor *tx = NULL;
                struct dmaengine_unmap_data *um;
                dma_addr_t *dsts;
@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
                mutex_unlock(&info->lock);
                return ret;
        } else if (dmatest_run) {
-               if (is_threaded_test_pending(info))
-                       start_threaded_tests(info);
-               else
-                       pr_info("Could not start test, no channels configured\n");
+               if (!is_threaded_test_pending(info)) {
+                       pr_info("No channels configured, continue with any\n");
+                       add_threaded_test(info);
+               }
+               start_threaded_tests(info);
        } else {
                stop_threaded_test(info);
        }
index f6f49f0f6fae25a284ffccce7afdc8a2df5668c3..8d79a8787104d48b0463540e4c281be7f99f0cff 100644 (file)
@@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
        perm.ignore = 0;
        iowrite32(perm.bits, idxd->reg_base + offset);
 
+       /*
+        * A readback from the device ensures that any previously generated
+        * completion record writes are visible to software based on PCI
+        * ordering rules.
+        */
+       perm.bits = ioread32(idxd->reg_base + offset);
+
        return 0;
 }
 
index d6fcd2e601034323123255280923b60e616cab2a..6510791b9921b4dabc49a02e8a5423ce8b605a96 100644 (file)
@@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
        struct llist_node *head;
        int queued = 0;
 
+       *processed = 0;
        head = llist_del_all(&irq_entry->pending_llist);
        if (!head)
                return 0;
@@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
        struct list_head *node, *next;
        int queued = 0;
 
+       *processed = 0;
        if (list_empty(&irq_entry->work_list))
                return 0;
 
@@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
        return queued;
 }
 
-irqreturn_t idxd_wq_thread(int irq, void *data)
+static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
 {
-       struct idxd_irq_entry *irq_entry = data;
-       int rc, processed = 0, retry = 0;
+       int rc, processed, total = 0;
 
        /*
         * There are two lists we are processing. The pending_llist is where
@@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
         */
        do {
                rc = irq_process_work_list(irq_entry, &processed);
-               if (rc != 0) {
-                       retry++;
+               total += processed;
+               if (rc != 0)
                        continue;
-               }
 
                rc = irq_process_pending_llist(irq_entry, &processed);
-       } while (rc != 0 && retry != 10);
+               total += processed;
+       } while (rc != 0);
+
+       return total;
+}
+
+irqreturn_t idxd_wq_thread(int irq, void *data)
+{
+       struct idxd_irq_entry *irq_entry = data;
+       int processed;
 
+       processed = idxd_desc_process(irq_entry);
        idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
+       /* catch anything unprocessed after unmasking */
+       processed += idxd_desc_process(irq_entry);
 
        if (processed == 0)
                return IRQ_NONE;
index 10117f271b12b2cfb540438b4cd979e92da3d265..d683232d7fea072f80d31a92ae553afe2c6ab236 100644 (file)
@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
                gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
                                size);
        tdmac->desc_arr = NULL;
+       if (tdmac->status == DMA_ERROR)
+               tdmac->status = DMA_COMPLETE;
 
        return;
 }
@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
        if (!desc)
                goto err_out;
 
-       mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+       if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
+               goto err_out;
 
        while (buf < buf_len) {
                desc = &tdmac->desc_arr[i];
index c683051257fd66a19b2861107c80feffa4891930..66ef70b00ec0b34a19510c8668f2fc716a1829ba 100644 (file)
@@ -175,13 +175,11 @@ struct owl_dma_txd {
  * @id: physical index to this channel
  * @base: virtual memory base for the dma channel
  * @vchan: the virtual channel currently being served by this physical channel
- * @lock: a lock to use when altering an instance of this struct
  */
 struct owl_dma_pchan {
        u32                     id;
        void __iomem            *base;
        struct owl_dma_vchan    *vchan;
-       spinlock_t              lock;
 };
 
 /**
@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
        for (i = 0; i < od->nr_pchans; i++) {
                pchan = &od->pchans[i];
 
-               spin_lock_irqsave(&pchan->lock, flags);
+               spin_lock_irqsave(&od->lock, flags);
                if (!pchan->vchan) {
                        pchan->vchan = vchan;
-                       spin_unlock_irqrestore(&pchan->lock, flags);
+                       spin_unlock_irqrestore(&od->lock, flags);
                        break;
                }
 
-               spin_unlock_irqrestore(&pchan->lock, flags);
+               spin_unlock_irqrestore(&od->lock, flags);
        }
 
        return pchan;
index 581e7a290d98e1856cb1d0428c53b4aaee6ce518..a3b0b4c56a190b55ec242762d3766dfae506dbb9 100644 (file)
@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
+       pd->dma.dev = &pdev->dev;
 
        err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
        if (err) {
@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
                goto err_free_irq;
        }
 
-       pd->dma.dev = &pdev->dev;
 
        INIT_LIST_HEAD(&pd->dma.channels);
 
index f6a2f42ffc514414965f40dae2be5143c37c87cc..b9f0d9636620d22ae8ef89413f929e99dfbfee22 100644 (file)
@@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
 static void tegra_dma_synchronize(struct dma_chan *dc)
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+       int err;
+
+       err = pm_runtime_get_sync(tdc->tdma->dev);
+       if (err < 0) {
+               dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
+               return;
+       }
 
        /*
         * CPU, which handles interrupt, could be busy in
@@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
        wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
 
        tasklet_kill(&tdc->tasklet);
+
+       pm_runtime_put(tdc->tdma->dev);
 }
 
 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
index c4ce5dfb149b1a17c5ca04d9f7b573e9e4276674..db58d7e4f9fec992b84a9db283d7894e502a952b 100644 (file)
@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
        ret = dma_async_device_register(&tdma->dma_dev);
        if (ret < 0) {
                dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
-               goto irq_dispose;
+               goto rpm_put;
        }
 
        ret = of_dma_controller_register(pdev->dev.of_node,
index d7b965049ccb1e63149bf8d636e95952bc5c9f52..fb7c8150b0d1d971ded5f66091c039864ee23215 100644 (file)
@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
                        soc_ep_map = &j721e_ep_map;
                } else {
                        pr_err("PSIL: No compatible machine found for map\n");
+                       mutex_unlock(&ep_map_mutex);
                        return ERR_PTR(-ENOTSUPP);
                }
                pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
index a9c0251adf1a45096b5acca28459570fe280af0f..a90e154b0ae0df1fc52513783dba950aef30235f 100644 (file)
@@ -2156,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
                d->residue += sg_dma_len(sgent);
        }
 
-       cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
+       cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+                        CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
 
        return d;
 }
@@ -2733,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                tr_req[1].dicnt3 = 1;
        }
 
-       cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
+       cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
+                        CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
 
        if (uc->config.metadata_size)
                d->vd.tx.metadata_ops = &metadata_ops;
index aecd5a35a29651dd68a5c7d9c680b97351b4a652..5429497d3560be6a54ace01afaf51ac3ad8a1427 100644 (file)
@@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
                return ret;
 
        spin_lock_irqsave(&chan->lock, flags);
-
-       desc = list_last_entry(&chan->active_list,
-                              struct xilinx_dma_tx_descriptor, node);
-       /*
-        * VDMA and simple mode do not support residue reporting, so the
-        * residue field will always be 0.
-        */
-       if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
-               residue = xilinx_dma_get_residue(chan, desc);
-
+       if (!list_empty(&chan->active_list)) {
+               desc = list_last_entry(&chan->active_list,
+                                      struct xilinx_dma_tx_descriptor, node);
+               /*
+                * VDMA and simple mode do not support residue reporting, so the
+                * residue field will always be 0.
+                */
+               if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+                       residue = xilinx_dma_get_residue(chan, desc);
+       }
        spin_unlock_irqrestore(&chan->lock, flags);
 
        dma_set_residue(txstate, residue);
index d47749a35863fa5f81b5ee42b1f096306d9a5297..ff253696d1833c1ea8357ff021a652fd341e23cb 100644 (file)
@@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
        struct zynqmp_dma_desc_sw *child, *next;
 
        chan->desc_free_cnt++;
+       list_del(&sdesc->node);
        list_add_tail(&sdesc->node, &chan->free_list);
        list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
                chan->desc_free_cnt++;
@@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
                dma_async_tx_callback callback;
                void *callback_param;
 
-               list_del(&desc->node);
-
                callback = desc->async_tx.callback;
                callback_param = desc->async_tx.callback_param;
                if (callback) {
index 9d2512913d2587361b576893897969d73e24d029..f564e15fbc7e6a193af171ad816bff26b0da2173 100644 (file)
@@ -407,6 +407,58 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
        }
 }
 
+static const char * const fw_err_rec_type_strs[] = {
+       "IPF SAL Error Record",
+       "SOC Firmware Error Record Type1 (Legacy CrashLog Support)",
+       "SOC Firmware Error Record Type2",
+};
+
+static void cper_print_fw_err(const char *pfx,
+                             struct acpi_hest_generic_data *gdata,
+                             const struct cper_sec_fw_err_rec_ref *fw_err)
+{
+       void *buf = acpi_hest_get_payload(gdata);
+       u32 offset, length = gdata->error_data_length;
+
+       printk("%s""Firmware Error Record Type: %s\n", pfx,
+              fw_err->record_type < ARRAY_SIZE(fw_err_rec_type_strs) ?
+              fw_err_rec_type_strs[fw_err->record_type] : "unknown");
+       printk("%s""Revision: %d\n", pfx, fw_err->revision);
+
+       /* Record Type based on UEFI 2.7 */
+       if (fw_err->revision == 0) {
+               printk("%s""Record Identifier: %08llx\n", pfx,
+                      fw_err->record_identifier);
+       } else if (fw_err->revision == 2) {
+               printk("%s""Record Identifier: %pUl\n", pfx,
+                      &fw_err->record_identifier_guid);
+       }
+
+       /*
+        * The FW error record may contain trailing data beyond the
+        * structure defined by the specification. As the fields
+        * defined (and hence the offset of any trailing data) vary
+        * with the revision, set the offset to account for this
+        * variation.
+        */
+       if (fw_err->revision == 0) {
+               /* record_identifier_guid not defined */
+               offset = offsetof(struct cper_sec_fw_err_rec_ref,
+                                 record_identifier_guid);
+       } else if (fw_err->revision == 1) {
+               /* record_identifier not defined */
+               offset = offsetof(struct cper_sec_fw_err_rec_ref,
+                                 record_identifier);
+       } else {
+               offset = sizeof(*fw_err);
+       }
+
+       buf += offset;
+       length -= offset;
+
+       print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, buf, length, true);
+}
+
 static void cper_print_tstamp(const char *pfx,
                                   struct acpi_hest_generic_data_v300 *gdata)
 {
@@ -494,6 +546,16 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
                else
                        goto err_section_too_small;
 #endif
+       } else if (guid_equal(sec_type, &CPER_SEC_FW_ERR_REC_REF)) {
+               struct cper_sec_fw_err_rec_ref *fw_err = acpi_hest_get_payload(gdata);
+
+               printk("%ssection_type: Firmware Error Record Reference\n",
+                      newpfx);
+               /* The minimal FW Error Record contains 16 bytes */
+               if (gdata->error_data_length >= SZ_16)
+                       cper_print_fw_err(newpfx, gdata, fw_err);
+               else
+                       goto err_section_too_small;
        } else {
                const void *err = acpi_hest_get_payload(gdata);
 
index 5d4f84781aa036bf83d7b842442d24e438111649..a52236e11e5f73ddea5bb1f42ca2ca7c42425dab 100644 (file)
@@ -114,14 +114,16 @@ static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
        const u32 color_black = 0x00000000;
        const u32 color_white = 0x00ffffff;
        const u8 *src;
-       u8 s8;
-       int m;
+       int m, n, bytes;
+       u8 x;
 
-       src = font->data + c * font->height;
-       s8 = *(src + h);
+       bytes = BITS_TO_BYTES(font->width);
+       src = font->data + c * font->height * bytes + h * bytes;
 
-       for (m = 0; m < 8; m++) {
-               if ((s8 >> (7 - m)) & 1)
+       for (m = 0; m < font->width; m++) {
+               n = m % 8;
+               x = *(src + m / 8);
+               if ((x >> (7 - n)) & 1)
                        *dst = color_white;
                else
                        *dst = color_black;
index 911a2bd0f6b7046096d204fb8b790c76480cd267..4e3055238f31922d7a59aa0cc2eaa47d3b2b4907 100644 (file)
@@ -130,11 +130,8 @@ static ssize_t systab_show(struct kobject *kobj,
        if (efi.smbios != EFI_INVALID_TABLE_ADDR)
                str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
 
-       if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86)) {
-               extern char *efi_systab_show_arch(char *str);
-
+       if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
                str = efi_systab_show_arch(str);
-       }
 
        return str - buf;
 }
index 99a5cde7c2d8fc52e914e4c7f2e38a8dc91c4f58..48161b1dd098f2694f8d5baf02403b6291928ea8 100644 (file)
@@ -60,7 +60,11 @@ static struct screen_info *setup_graphics(void)
                si = alloc_screen_info();
                if (!si)
                        return NULL;
-               efi_setup_gop(si, &gop_proto, size);
+               status = efi_setup_gop(si, &gop_proto, size);
+               if (status != EFI_SUCCESS) {
+                       free_screen_info(si);
+                       return NULL;
+               }
        }
        return si;
 }
index 67d26949fd26df4a90ed2a6793d55940b46cef6c..62943992f02fe5d30a5c7a36ad83b9667bf1b068 100644 (file)
@@ -92,6 +92,19 @@ extern __pure efi_system_table_t  *efi_system_table(void);
 #define EFI_LOCATE_BY_REGISTER_NOTIFY          1
 #define EFI_LOCATE_BY_PROTOCOL                 2
 
+/*
+ * An efi_boot_memmap is used by efi_get_memory_map() to return the
+ * EFI memory map in a dynamically allocated buffer.
+ *
+ * The buffer allocated for the EFI memory map includes extra room for
+ * a minimum of EFI_MMAP_NR_SLACK_SLOTS additional EFI memory descriptors.
+ * This facilitates the reuse of the EFI memory map buffer when a second
+ * call to ExitBootServices() is needed because of intervening changes to
+ * the EFI memory map. Other related structures, e.g. x86 e820ext, need
+ * to factor in this headroom requirement as well.
+ */
+#define EFI_MMAP_NR_SLACK_SLOTS        8
+
 struct efi_boot_memmap {
        efi_memory_desc_t       **map;
        unsigned long           *map_size;
index 869a79c8946f21f8556f50b132847ddd015c46b8..09f4fa01914eea7fd76bb4bcc73ea95fe1e22c5b 100644 (file)
@@ -5,8 +5,6 @@
 
 #include "efistub.h"
 
-#define EFI_MMAP_NR_SLACK_SLOTS        8
-
 static inline bool mmap_has_headroom(unsigned long buff_size,
                                     unsigned long map_size,
                                     unsigned long desc_size)
index 1d59e103a2e3a61aae5b29d99a5c3a25e7784a9b..e9a684637b703d09f591641bf3c056e0735abb24 100644 (file)
@@ -54,7 +54,7 @@ void efi_retrieve_tpm2_eventlog(void)
        efi_status_t status;
        efi_physical_addr_t log_location = 0, log_last_entry = 0;
        struct linux_efi_tpm_eventlog *log_tbl = NULL;
-       struct efi_tcg2_final_events_table *final_events_table;
+       struct efi_tcg2_final_events_table *final_events_table = NULL;
        unsigned long first_entry_addr, last_entry_addr;
        size_t log_size, last_entry_size;
        efi_bool_t truncated;
@@ -127,7 +127,8 @@ void efi_retrieve_tpm2_eventlog(void)
         * Figure out whether any events have already been logged to the
         * final events structure, and if so how much space they take up
         */
-       final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
+       if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
+               final_events_table = get_efi_config_table(LINUX_EFI_TPM_FINAL_LOG_GUID);
        if (final_events_table && final_events_table->nr_events) {
                struct tcg_pcr_event2_head *header;
                int offset;
index 05ccb229fb45ffbb24df88bbb1f90ffb5ec73f77..f0339b5d3658dc8d282484a93e825e8ac962c896 100644 (file)
@@ -606,24 +606,18 @@ static efi_status_t allocate_e820(struct boot_params *params,
                                  struct setup_data **e820ext,
                                  u32 *e820ext_size)
 {
-       unsigned long map_size, desc_size, buff_size;
-       struct efi_boot_memmap boot_map;
-       efi_memory_desc_t *map;
+       unsigned long map_size, desc_size, map_key;
        efi_status_t status;
-       __u32 nr_desc;
+       __u32 nr_desc, desc_version;
 
-       boot_map.map            = &map;
-       boot_map.map_size       = &map_size;
-       boot_map.desc_size      = &desc_size;
-       boot_map.desc_ver       = NULL;
-       boot_map.key_ptr        = NULL;
-       boot_map.buff_size      = &buff_size;
+       /* Only need the size of the mem map and size of each mem descriptor */
+       map_size = 0;
+       status = efi_bs_call(get_memory_map, &map_size, NULL, &map_key,
+                            &desc_size, &desc_version);
+       if (status != EFI_BUFFER_TOO_SMALL)
+               return (status != EFI_SUCCESS) ? status : EFI_UNSUPPORTED;
 
-       status = efi_get_memory_map(&boot_map);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       nr_desc = buff_size / desc_size;
+       nr_desc = map_size / desc_size + EFI_MMAP_NR_SLACK_SLOTS;
 
        if (nr_desc > ARRAY_SIZE(params->e820_table)) {
                u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
index 31f9f0e369b977345dee40523d2f615aec402e84..c1955d320fecd6548cf99e9692562bac10f3998f 100644 (file)
@@ -16,7 +16,7 @@
 int efi_tpm_final_log_size;
 EXPORT_SYMBOL(efi_tpm_final_log_size);
 
-static int tpm2_calc_event_log_size(void *data, int count, void *size_info)
+static int __init tpm2_calc_event_log_size(void *data, int count, void *size_info)
 {
        struct tcg_pcr_event2_head *header;
        int event_size, size = 0;
@@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void)
        tbl_size = sizeof(*log_tbl) + log_tbl->size;
        memblock_reserve(efi.tpm_log, tbl_size);
 
-       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
+       if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
+           log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
+               pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
                goto out;
+       }
 
        final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
 
index baee8c3f06ad864a9bcc6b24d36c2e414d97ff97..cf3687a7925ff0b01cf9a67190ca537f3cbf8880 100644 (file)
@@ -625,7 +625,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
 
        kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(kona_gpio->reg_base)) {
-               ret = -ENXIO;
+               ret = PTR_ERR(kona_gpio->reg_base);
                goto err_irq_domain;
        }
 
index da1ef0b1c291bd331346d58ee382feeb86861a20..b1accfba017d1571f71c55ce19ffa435c0e79e9c 100644 (file)
@@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
        mutex_init(&exar_gpio->lock);
 
        index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
-       if (index < 0)
-               goto err_destroy;
+       if (index < 0) {
+               ret = index;
+               goto err_mutex_destroy;
+       }
 
        sprintf(exar_gpio->name, "exar_gpio%d", index);
        exar_gpio->gpio_chip.label = exar_gpio->name;
@@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
 
 err_destroy:
        ida_simple_remove(&ida_index, index);
+err_mutex_destroy:
        mutex_destroy(&exar_gpio->lock);
        return ret;
 }
index 7b70850502192d67df4e9d6dbe5a4f7369c075cb..da570e63589d09896a6bfb9d7c24811604b0a5d8 100644 (file)
@@ -127,8 +127,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
 {
        u32 arm_gpio_lock_val;
 
-       spin_lock(&gs->gc.bgpio_lock);
        mutex_lock(yu_arm_gpio_lock_param.lock);
+       spin_lock(&gs->gc.bgpio_lock);
 
        arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
 
@@ -136,8 +136,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
         * When lock active bit[31] is set, ModeX is write enabled
         */
        if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
-               mutex_unlock(yu_arm_gpio_lock_param.lock);
                spin_unlock(&gs->gc.bgpio_lock);
+               mutex_unlock(yu_arm_gpio_lock_param.lock);
                return -EINVAL;
        }
 
@@ -152,8 +152,8 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
 static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
 {
        writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
-       mutex_unlock(yu_arm_gpio_lock_param.lock);
        spin_unlock(&gs->gc.bgpio_lock);
+       mutex_unlock(yu_arm_gpio_lock_param.lock);
 }
 
 /*
index 3c9f4fb3d5a28c6fccac61b805af0e319cc0737c..bd65114eb17048c40ed43cc6bbe3e6157149c2b1 100644 (file)
@@ -782,6 +782,15 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
                                     "marvell,armada-370-gpio"))
                return 0;
 
+       /*
+        * There are only two sets of PWM configuration registers for
+        * all the GPIO lines on those SoCs which this driver reserves
+        * for the first two GPIO chips. So if the resource is missing
+        * we can't treat it as an error.
+        */
+       if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+               return 0;
+
        if (IS_ERR(mvchip->clk))
                return PTR_ERR(mvchip->clk);
 
@@ -804,12 +813,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
        mvchip->mvpwm = mvpwm;
        mvpwm->mvchip = mvchip;
 
-       /*
-        * There are only two sets of PWM configuration registers for
-        * all the GPIO lines on those SoCs which this driver reserves
-        * for the first two GPIO chips. So if the resource is missing
-        * we can't treat it as an error.
-        */
        mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
        if (IS_ERR(mvpwm->membase))
                return PTR_ERR(mvpwm->membase);
index 5638b4e5355f1f04088273230f677f7709b1edb6..4269ea9a817e6e2e3eda31b766e4346a364e5044 100644 (file)
@@ -531,7 +531,7 @@ static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
 {
        struct pca953x_chip *chip = gpiochip_get_data(gc);
 
-       switch (config) {
+       switch (pinconf_to_config_param(config)) {
        case PIN_CONFIG_BIAS_PULL_UP:
        case PIN_CONFIG_BIAS_PULL_DOWN:
                return pca953x_gpio_set_pull_up_down(chip, offset, config);
index 1361270ecf8ce98370687a11eeec6659009ad6d5..0cb6600b8eeee88e4fc12c837d68c8c5d13a9c53 100644 (file)
@@ -660,8 +660,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
        pchip->irq1 = irq1;
 
        gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
-       if (!gpio_reg_base)
-               return -EINVAL;
+       if (IS_ERR(gpio_reg_base))
+               return PTR_ERR(gpio_reg_base);
 
        clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
index acb99eff99394f406ae013cc860f5048ac496df1..86568154cdb3d316ae8f8cd624486945800f7d19 100644 (file)
@@ -368,6 +368,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
        struct tegra_gpio_info *tgi = bank->tgi;
        unsigned int gpio = d->hwirq;
 
+       tegra_gpio_irq_mask(d);
        gpiochip_unlock_as_irq(&tgi->gc, gpio);
 }
 
index 40f2d7f69be26f228f00bdcf88b5c36ace3e855d..c14f0784274ae8ab7780a69791ae1682480c3f2e 100644 (file)
@@ -729,6 +729,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                        if (ret)
                                goto out_free_descs;
                }
+
+               atomic_notifier_call_chain(&desc->gdev->notifier,
+                                          GPIOLINE_CHANGED_REQUESTED, desc);
+
                dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
                        offset);
        }
@@ -1083,6 +1087,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        if (ret)
                goto out_free_desc;
 
+       atomic_notifier_call_chain(&desc->gdev->notifier,
+                                  GPIOLINE_CHANGED_REQUESTED, desc);
+
        le->irq = gpiod_to_irq(desc);
        if (le->irq <= 0) {
                ret = -ENODEV;
@@ -1158,8 +1165,19 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
                                  struct gpioline_info *info)
 {
        struct gpio_chip *gc = desc->gdev->chip;
+       bool ok_for_pinctrl;
        unsigned long flags;
 
+       /*
+        * This function takes a mutex so we must check this before taking
+        * the spinlock.
+        *
+        * FIXME: find a non-racy way to retrieve this information. Maybe a
+        * lock common to both frameworks?
+        */
+       ok_for_pinctrl =
+               pinctrl_gpio_can_use_line(gc->base + info->line_offset);
+
        spin_lock_irqsave(&gpio_lock, flags);
 
        if (desc->name) {
@@ -1186,7 +1204,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
            test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
            test_bit(FLAG_EXPORT, &desc->flags) ||
            test_bit(FLAG_SYSFS, &desc->flags) ||
-           !pinctrl_gpio_can_use_line(gc->base + info->line_offset))
+           !ok_for_pinctrl)
                info->flags |= GPIOLINE_FLAG_KERNEL;
        if (test_bit(FLAG_IS_OUT, &desc->flags))
                info->flags |= GPIOLINE_FLAG_IS_OUT;
@@ -1227,6 +1245,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        void __user *ip = (void __user *)arg;
        struct gpio_desc *desc;
        __u32 offset;
+       int hwgpio;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
        if (!gc)
@@ -1259,13 +1278,19 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (IS_ERR(desc))
                        return PTR_ERR(desc);
 
+               hwgpio = gpio_chip_hwgpio(desc);
+
+               if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL &&
+                   test_bit(hwgpio, priv->watched_lines))
+                       return -EBUSY;
+
                gpio_desc_to_lineinfo(desc, &lineinfo);
 
                if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
                        return -EFAULT;
 
                if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL)
-                       set_bit(gpio_chip_hwgpio(desc), priv->watched_lines);
+                       set_bit(hwgpio, priv->watched_lines);
 
                return 0;
        } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
@@ -1280,7 +1305,12 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                if (IS_ERR(desc))
                        return PTR_ERR(desc);
 
-               clear_bit(gpio_chip_hwgpio(desc), priv->watched_lines);
+               hwgpio = gpio_chip_hwgpio(desc);
+
+               if (!test_bit(hwgpio, priv->watched_lines))
+                       return -EBUSY;
+
+               clear_bit(hwgpio, priv->watched_lines);
                return 0;
        }
        return -EINVAL;
@@ -2975,8 +3005,6 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
        }
 done:
        spin_unlock_irqrestore(&gpio_lock, flags);
-       atomic_notifier_call_chain(&desc->gdev->notifier,
-                                  GPIOLINE_CHANGED_REQUESTED, desc);
        return ret;
 }
 
@@ -4192,7 +4220,9 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
                }
        }
 
-       if (test_bit(FLAG_IS_OUT, &desc->flags)) {
+       /* To be valid for IRQ the line needs to be input or open drain */
+       if (test_bit(FLAG_IS_OUT, &desc->flags) &&
+           !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
                chip_err(gc,
                         "%s: tried to flag a GPIO set as output for IRQ\n",
                         __func__);
@@ -4255,7 +4285,12 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
 
        if (!IS_ERR(desc) &&
            !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
-               WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
+               /*
+                * We must not be output when using IRQ UNLESS we are
+                * open drain.
+                */
+               WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
+                       !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
                set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
        }
 }
@@ -4938,6 +4973,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                return ERR_PTR(ret);
        }
 
+       atomic_notifier_call_chain(&desc->gdev->notifier,
+                                  GPIOLINE_CHANGED_REQUESTED, desc);
+
        return desc;
 }
 EXPORT_SYMBOL_GPL(gpiod_get_index);
@@ -5003,6 +5041,9 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                return ERR_PTR(ret);
        }
 
+       atomic_notifier_call_chain(&desc->gdev->notifier,
+                                  GPIOLINE_CHANGED_REQUESTED, desc);
+
        return desc;
 }
 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -5289,8 +5330,9 @@ static int __init gpiolib_dev_init(void)
        gpiolib_initialized = true;
        gpiochip_setup_devs();
 
-       if (IS_ENABLED(CONFIG_OF_DYNAMIC))
-               WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier));
+#if IS_ENABLED(CONFIG_OF_DYNAMIC) && IS_ENABLED(CONFIG_OF_GPIO)
+       WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier));
+#endif /* CONFIG_OF_DYNAMIC && CONFIG_OF_GPIO */
 
        return ret;
 }
index 2992a49ad4a57d0c34ef0c46addf54b681f3ad2e..8ac1581a6b53b21c0c9641455f1f4356915d6d23 100644 (file)
@@ -945,6 +945,7 @@ struct amdgpu_device {
 
        /* s3/s4 mask */
        bool                            in_suspend;
+       bool                            in_hibernate;
 
        /* record last mm index being written through WREG32*/
        unsigned long last_mm_index;
index 9dff792c929036701e78af5f19f24463e9f6448b..6a5b91d23fd9b8492c02996a5af0c60ee7d717bd 100644 (file)
@@ -1343,7 +1343,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        }
 
        /* Free the BO*/
-       amdgpu_bo_unref(&mem->bo);
+       drm_gem_object_put_unlocked(&mem->bo->tbo.base);
        mutex_destroy(&mem->lock);
        kfree(mem);
 
@@ -1688,7 +1688,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
                | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
                | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
 
-       (*mem)->bo = amdgpu_bo_ref(bo);
+       drm_gem_object_get(&bo->tbo.base);
+       (*mem)->bo = bo;
        (*mem)->va = va;
        (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
                AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
index f84f9e35a73b3b617070df3f84b8f342eb5b7ee8..affde2de2a0dbd3f68f934c693ff7a906f9223a7 100644 (file)
@@ -3372,15 +3372,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                }
        }
 
-       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-
-       amdgpu_amdkfd_suspend(adev, !fbcon);
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
+       amdgpu_amdkfd_suspend(adev, !fbcon);
+
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
index 8ea86ffdea0d8dfbcf860c67bc1cfc08d26885a8..a735d79a717be8444a1491408d8e25f65402c168 100644 (file)
  * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
  * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  * - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       36
+#define KMS_DRIVER_MINOR       37
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -1180,7 +1181,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
        struct amdgpu_device *adev = drm_dev->dev_private;
        int r;
 
+       adev->in_hibernate = true;
        r = amdgpu_device_suspend(drm_dev, true);
+       adev->in_hibernate = false;
        if (r)
                return r;
        return amdgpu_asic_reset(adev);
index 9ae7b61f696a244c7182b06843330728ec455e47..25ddb482466a7f034196c6241e86d4ae16b232b4 100644 (file)
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
        u32 cpp;
        u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
                               AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS     |
-                              AMDGPU_GEM_CREATE_VRAM_CLEARED        |
-                              AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+                              AMDGPU_GEM_CREATE_VRAM_CLEARED;
 
        info = drm_get_format_info(adev->ddev, mode_cmd);
        cpp = info->cpp[0];
index f92c158d89a12ba4906014fcc6868969858c4abf..0e0daf0021b60aa9bf0b94ccecb40388f257133d 100644 (file)
@@ -4273,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                /* ===  CGCG /CGLS for GFX 3D Only === */
                gfx_v10_0_update_3d_clock_gating(adev, enable);
                /* ===  MGCG + MGLS === */
-               /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
+               gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
        }
 
        if (adev->cg_flags &
@@ -4353,11 +4353,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
        switch (adev->asic_type) {
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-               if (!enable) {
-                       amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               } else
-                       amdgpu_gfx_off_ctrl(adev, true);
+               amdgpu_gfx_off_ctrl(adev, enable);
                break;
        default:
                break;
@@ -4918,6 +4914,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
                                                           ref, mask);
 }
 
+static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+                                        unsigned vmid)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t value = 0;
+
+       value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+       value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+       value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+       value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+       WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+}
+
 static void
 gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
                                      uint32_t me, uint32_t pipe,
@@ -5309,6 +5318,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
        .emit_wreg = gfx_v10_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+       .soft_recovery = gfx_v10_0_ring_soft_recovery,
 };
 
 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
index 0c390485bc106455960cfe2120d2dec47d4b7bfd..d2d9dce68c2f186fc6544e37551008c451eed06a 100644 (file)
@@ -1236,6 +1236,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
        /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+       /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+       { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
        { 0, 0, 0, 0, 0 },
 };
 
@@ -5025,10 +5027,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
        switch (adev->asic_type) {
        case CHIP_RAVEN:
        case CHIP_RENOIR:
-               if (!enable) {
+               if (!enable)
                        amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               }
+
                if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
                        gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
                        gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -5052,12 +5053,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
                        amdgpu_gfx_off_ctrl(adev, true);
                break;
        case CHIP_VEGA12:
-               if (!enable) {
-                       amdgpu_gfx_off_ctrl(adev, false);
-                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
-               } else {
-                       amdgpu_gfx_off_ctrl(adev, true);
-               }
+               amdgpu_gfx_off_ctrl(adev, enable);
                break;
        default:
                break;
index 074a9a09c0a79e80b29296e22843ff962ae04d33..a5b60c9a24189d314f00b7ee1c09d8896ae6f2aa 100644 (file)
 #define SDMA_OP_AQL_COPY  0
 #define SDMA_OP_AQL_BARRIER_OR  0
 
+#define SDMA_GCR_RANGE_IS_PA           (1 << 18)
+#define SDMA_GCR_SEQ(x)                        (((x) & 0x3) << 16)
+#define SDMA_GCR_GL2_WB                        (1 << 15)
+#define SDMA_GCR_GL2_INV               (1 << 14)
+#define SDMA_GCR_GL2_DISCARD           (1 << 13)
+#define SDMA_GCR_GL2_RANGE(x)          (((x) & 0x3) << 11)
+#define SDMA_GCR_GL2_US                        (1 << 10)
+#define SDMA_GCR_GL1_INV               (1 << 9)
+#define SDMA_GCR_GLV_INV               (1 << 8)
+#define SDMA_GCR_GLK_INV               (1 << 7)
+#define SDMA_GCR_GLK_WB                        (1 << 6)
+#define SDMA_GCR_GLM_INV               (1 << 5)
+#define SDMA_GCR_GLM_WB                        (1 << 4)
+#define SDMA_GCR_GL1_RANGE(x)          (((x) & 0x3) << 2)
+#define SDMA_GCR_GLI_INV(x)            (((x) & 0x3) << 0)
+
 /*define for op field*/
 #define SDMA_PKT_HEADER_op_offset 0
 #define SDMA_PKT_HEADER_op_mask   0x000000FF
index ebfd2cdf4e651215bc5ee0bf6d54be8ec9f46762..d2840c2f62865c23229d599fecfa530be2be7ce7 100644 (file)
@@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
 
+       /* Invalidate L2, because if we don't do it, we might get stale cache
+        * lines from previous IBs.
+        */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
+                                SDMA_GCR_GL2_WB |
+                                SDMA_GCR_GLM_INV |
+                                SDMA_GCR_GLM_WB) << 16);
+       amdgpu_ring_write(ring, 0xffffff80);
+       amdgpu_ring_write(ring, 0xffff);
+
        /* An IB packet must end on a 8 DW boundary--the next dword
         * must be on a 8-dword boundary. Our IB packet below is 6
         * dwords long, thus add x number of NOPs, such that, in
@@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
                10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
-       .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
+       .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
        .emit_ib = sdma_v5_0_ring_emit_ib,
        .emit_fence = sdma_v5_0_ring_emit_fence,
        .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
index 4a3049841086d826ea8d798606a7af82dd44ffc1..c24cad3c64ed250ee280f68bae38152637b78fee 100644 (file)
@@ -1050,7 +1050,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
 /* Check with device cgroup if @kfd device is accessible */
 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
 {
-#if defined(CONFIG_CGROUP_DEVICE)
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
        struct drm_device *ddev = kfd->ddev;
 
        return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
index c5ba5d46a1485b4bee4eb7cf3b5b87f141c9dff4..7fc15b82fe48afad1d276e416b1d8271d3953631 100644 (file)
@@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
 
 /**
  * dm_crtc_high_irq() - Handles CRTC interrupt
- * @interrupt_params: ignored
+ * @interrupt_params: used for determining the CRTC instance
  *
  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
  * event handler.
@@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
        unsigned long flags;
 
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
-       if (acrtc) {
-               acrtc_state = to_dm_crtc_state(acrtc->base.state);
-
-               DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
-                             acrtc->crtc_id,
-                             amdgpu_dm_vrr_active(acrtc_state));
-
-               /* Core vblank handling at start of front-porch is only possible
-                * in non-vrr mode, as only there vblank timestamping will give
-                * valid results while done in front-porch. Otherwise defer it
-                * to dm_vupdate_high_irq after end of front-porch.
-                */
-               if (!amdgpu_dm_vrr_active(acrtc_state))
-                       drm_crtc_handle_vblank(&acrtc->base);
-
-               /* Following stuff must happen at start of vblank, for crc
-                * computation and below-the-range btr support in vrr mode.
-                */
-               amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
-               if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
-                   acrtc_state->vrr_params.supported &&
-                   acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
-                       spin_lock_irqsave(&adev->ddev->event_lock, flags);
-                       mod_freesync_handle_v_update(
-                               adev->dm.freesync_module,
-                               acrtc_state->stream,
-                               &acrtc_state->vrr_params);
-
-                       dc_stream_adjust_vmin_vmax(
-                               adev->dm.dc,
-                               acrtc_state->stream,
-                               &acrtc_state->vrr_params.adjust);
-                       spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-               }
-       }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-/**
- * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
- * @interrupt params - interrupt parameters
- *
- * Notify DRM's vblank event handler at VSTARTUP
- *
- * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
- * * We are close enough to VUPDATE - the point of no return for hw
- * * We are in the fixed portion of variable front porch when vrr is enabled
- * * We are before VUPDATE, where double-buffered vrr registers are swapped
- *
- * It is therefore the correct place to signal vblank, send user flip events,
- * and update VRR.
- */
-static void dm_dcn_crtc_high_irq(void *interrupt_params)
-{
-       struct common_irq_params *irq_params = interrupt_params;
-       struct amdgpu_device *adev = irq_params->adev;
-       struct amdgpu_crtc *acrtc;
-       struct dm_crtc_state *acrtc_state;
-       unsigned long flags;
-
-       acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
        if (!acrtc)
                return;
 
@@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
                         amdgpu_dm_vrr_active(acrtc_state),
                         acrtc_state->active_planes);
 
+       /**
+        * Core vblank handling at start of front-porch is only possible
+        * in non-vrr mode, as only there vblank timestamping will give
+        * valid results while done in front-porch. Otherwise defer it
+        * to dm_vupdate_high_irq after end of front-porch.
+        */
+       if (!amdgpu_dm_vrr_active(acrtc_state))
+               drm_crtc_handle_vblank(&acrtc->base);
+
+       /**
+        * Following stuff must happen at start of vblank, for crc
+        * computation and below-the-range btr support in vrr mode.
+        */
        amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-       drm_crtc_handle_vblank(&acrtc->base);
+
+       /* BTR updates need to happen before VUPDATE on Vega and above. */
+       if (adev->family < AMDGPU_FAMILY_AI)
+               return;
 
        spin_lock_irqsave(&adev->ddev->event_lock, flags);
 
-       if (acrtc_state->vrr_params.supported &&
+       if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
            acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
-               mod_freesync_handle_v_update(
-               adev->dm.freesync_module,
-               acrtc_state->stream,
-               &acrtc_state->vrr_params);
+               mod_freesync_handle_v_update(adev->dm.freesync_module,
+                                            acrtc_state->stream,
+                                            &acrtc_state->vrr_params);
 
-               dc_stream_adjust_vmin_vmax(
-                       adev->dm.dc,
-                       acrtc_state->stream,
-                       &acrtc_state->vrr_params.adjust);
+               dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
+                                          &acrtc_state->vrr_params.adjust);
        }
 
        /*
@@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
         * avoid race conditions between flip programming and completion,
         * which could cause too early flip completion events.
         */
-       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+       if (adev->family >= AMDGPU_FAMILY_RV &&
+           acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
            acrtc_state->active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 }
-#endif
 
 static int dm_set_clockgating_state(void *handle,
                  enum amd_clockgating_state state)
@@ -2008,17 +1957,22 @@ void amdgpu_dm_update_connector_after_detect(
                dc_sink_retain(aconnector->dc_sink);
                if (sink->dc_edid.length == 0) {
                        aconnector->edid = NULL;
-                       drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+                       if (aconnector->dc_link->aux_mode) {
+                               drm_dp_cec_unset_edid(
+                                       &aconnector->dm_dp_aux.aux);
+                       }
                } else {
                        aconnector->edid =
-                               (struct edid *) sink->dc_edid.raw_edid;
-
+                               (struct edid *)sink->dc_edid.raw_edid;
 
                        drm_connector_update_edid_property(connector,
-                                       aconnector->edid);
-                       drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
-                                           aconnector->edid);
+                                                          aconnector->edid);
+
+                       if (aconnector->dc_link->aux_mode)
+                               drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+                                                   aconnector->edid);
                }
+
                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
                update_connector_ext_caps(aconnector);
        } else {
@@ -2440,8 +2394,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
                c_irq_params->adev = adev;
                c_irq_params->irq_src = int_params.irq_source;
 
+               amdgpu_dm_irq_register_interrupt(
+                       adev, &int_params, dm_crtc_high_irq, c_irq_params);
+       }
+
+       /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+        * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+        * to trigger at end of each vblank, regardless of state of the lock,
+        * matching DCE behaviour.
+        */
+       for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+            i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+            i++) {
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+               if (r) {
+                       DRM_ERROR("Failed to add vupdate irq id!\n");
+                       return r;
+               }
+
+               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+               int_params.irq_source =
+                       dc_interrupt_to_irq_source(dc, i, 0);
+
+               c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+               c_irq_params->adev = adev;
+               c_irq_params->irq_src = int_params.irq_source;
+
                amdgpu_dm_irq_register_interrupt(adev, &int_params,
-                               dm_dcn_crtc_high_irq, c_irq_params);
+                               dm_vupdate_high_irq, c_irq_params);
        }
 
        /* Use GRPH_PFLIP interrupt */
@@ -3340,7 +3322,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
                          const union dc_tiling_info *tiling_info,
                          const uint64_t info,
                          struct dc_plane_dcc_param *dcc,
-                         struct dc_plane_address *address)
+                         struct dc_plane_address *address,
+                         bool force_disable_dcc)
 {
        struct dc *dc = adev->dm.dc;
        struct dc_dcc_surface_param input;
@@ -3352,6 +3335,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
        memset(&input, 0, sizeof(input));
        memset(&output, 0, sizeof(output));
 
+       if (force_disable_dcc)
+               return 0;
+
        if (!offset)
                return 0;
 
@@ -3401,7 +3387,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
                             union dc_tiling_info *tiling_info,
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
-                            struct dc_plane_address *address)
+                            struct dc_plane_address *address,
+                            bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = &afb->base;
        int ret;
@@ -3507,7 +3494,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 
                ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
                                                plane_size, tiling_info,
-                                               tiling_flags, dcc, address);
+                                               tiling_flags, dcc, address,
+                                               force_disable_dcc);
                if (ret)
                        return ret;
        }
@@ -3599,7 +3587,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
-                           struct dc_plane_address *address)
+                           struct dc_plane_address *address,
+                           bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = plane_state->fb;
        const struct amdgpu_framebuffer *afb =
@@ -3681,7 +3670,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address);
+                                          &plane_info->dcc, address,
+                                          force_disable_dcc);
        if (ret)
                return ret;
 
@@ -3704,6 +3694,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool force_disable_dcc = false;
 
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
        if (ret)
@@ -3718,9 +3709,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        if (ret)
                return ret;
 
+       force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
-                                         &dc_plane_state->address);
+                                         &dc_plane_state->address,
+                                         force_disable_dcc);
        if (ret)
                return ret;
 
@@ -4437,10 +4430,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int rc;
 
-       /* Do not set vupdate for DCN hardware */
-       if (adev->family > AMDGPU_FAMILY_AI)
-               return 0;
-
        irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
 
        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -5342,6 +5331,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool force_disable_dcc = false;
 
        dm_plane_state_old = to_dm_plane_state(plane->state);
        dm_plane_state_new = to_dm_plane_state(new_state);
@@ -5400,11 +5390,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
                        dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
                struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
 
+               force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
                fill_plane_buffer_attributes(
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address);
+                       &plane_state->address,
+                       force_disable_dcc);
        }
 
        return 0;
@@ -6676,7 +6668,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
-                       &bundle->flip_addrs[planes_count].address);
+                       &bundle->flip_addrs[planes_count].address,
+                       false);
+
+               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+                                new_plane_state->plane->index,
+                                bundle->plane_infos[planes_count].dcc.enable);
 
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
@@ -7858,6 +7855,7 @@ static int dm_update_plane_state(struct dc *dc,
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
        struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+       struct amdgpu_crtc *new_acrtc;
        bool needs_reset;
        int ret = 0;
 
@@ -7867,9 +7865,23 @@ static int dm_update_plane_state(struct dc *dc,
        dm_new_plane_state = to_dm_plane_state(new_plane_state);
        dm_old_plane_state = to_dm_plane_state(old_plane_state);
 
-       /*TODO Implement atomic check for cursor plane */
-       if (plane->type == DRM_PLANE_TYPE_CURSOR)
+       /*TODO Implement better atomic check for cursor plane */
+       if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+               if (!enable || !new_plane_crtc ||
+                       drm_atomic_plane_disabling(plane->state, new_plane_state))
+                       return 0;
+
+               new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+               if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
+                       (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
+                       DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
+                                                        new_plane_state->crtc_w, new_plane_state->crtc_h);
+                       return -EINVAL;
+               }
+
                return 0;
+       }
 
        needs_reset = should_reset_plane(state, plane, old_plane_state,
                                         new_plane_state);
@@ -8096,7 +8108,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address);
+                                       &flip_addr->address,
+                                       false);
                                if (ret)
                                        goto cleanup;
 
index 78e1c11d4ae544d59516dc4e17a8732c14d38bf9..dcf84a61de37f3b1254da646ec3688c00de3ecbf 100644 (file)
@@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        struct mod_hdcp_display *display = &hdcp_work[link_index].display;
        struct mod_hdcp_link *link = &hdcp_work[link_index].link;
 
-       memset(display, 0, sizeof(*display));
-       memset(link, 0, sizeof(*link));
-
-       display->index = aconnector->base.index;
-
        if (config->dpms_off) {
                hdcp_remove_display(hdcp_work, link_index, aconnector);
                return;
        }
+
+       memset(display, 0, sizeof(*display));
+       memset(link, 0, sizeof(*link));
+
+       display->index = aconnector->base.index;
        display->state = MOD_HDCP_DISPLAY_ACTIVE;
 
        if (aconnector->dc_sink != NULL)
index 8489f1e56892c9b5dce662812887f0dba40e49b8..47431ca6986db7443ec7397678621e8985bdbcbb 100644 (file)
@@ -834,11 +834,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
-       int count = 0;
-       struct pipe_ctx *pipe;
        PERF_TRACE();
        for (i = 0; i < MAX_PIPES; i++) {
-               pipe = &context->res_ctx.pipe_ctx[i];
+               int count = 0;
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                if (!pipe->plane_state)
                        continue;
index 7cbb1efb4f68eb245c33eb7dd525256a6ea0b9ed..caa090d0b6acc6549186f96a850edbf60ad46343 100644 (file)
@@ -220,6 +220,30 @@ static enum dpcd_training_patterns
        return dpcd_tr_pattern;
 }
 
+static uint8_t dc_dp_initialize_scrambling_data_symbols(
+       struct dc_link *link,
+       enum dc_dp_training_pattern pattern)
+{
+       uint8_t disable_scrabled_data_symbols = 0;
+
+       switch (pattern) {
+       case DP_TRAINING_PATTERN_SEQUENCE_1:
+       case DP_TRAINING_PATTERN_SEQUENCE_2:
+       case DP_TRAINING_PATTERN_SEQUENCE_3:
+               disable_scrabled_data_symbols = 1;
+               break;
+       case DP_TRAINING_PATTERN_SEQUENCE_4:
+               disable_scrabled_data_symbols = 0;
+               break;
+       default:
+               ASSERT(0);
+               DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+                       __func__, pattern);
+               break;
+       }
+       return disable_scrabled_data_symbols;
+}
+
 static inline bool is_repeater(struct dc_link *link, uint32_t offset)
 {
        return (!link->is_lttpr_mode_transparent && offset != 0);
@@ -252,6 +276,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
        dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
                dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
 
+       dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+               dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
        dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
                = dpcd_pattern.raw;
 
@@ -2908,6 +2935,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                                        sizeof(hpd_irq_dpcd_data),
                                        "Status: ");
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.blank_stream(pipe_ctx);
+               }
+
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
@@ -2927,6 +2960,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_link_reallocate_mst_payload(link);
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+               }
+
                status = false;
                if (out_link_loss)
                        *out_link_loss = true;
@@ -4227,6 +4266,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
        const uint32_t post_oui_delay = 30; // 30ms
+       uint8_t dspc = 0;
+       enum dc_status ret = DC_ERROR_UNEXPECTED;
+
+       ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+                                 sizeof(dspc));
+
+       if (ret != DC_OK) {
+               DC_LOG_ERROR("Error in DP aux read transaction,"
+                            " not writing source specific data\n");
+               return;
+       }
+
+       /* Return if OUI unsupported */
+       if (!(dspc & DP_OUI_SUPPORT))
+               return;
 
        if (!link->dc->vendor_signature.is_valid) {
                struct dpcd_amd_signature amd_signature;
index 6ddbb00ed37a5aa5f0b53a525995bb327bc882b7..4f0e7203dba4f41fd6fa4419b11f99aefc7d97c1 100644 (file)
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
        return dc_stream_get_status_from_state(dc->current_state, stream);
 }
 
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       unsigned int vupdate_line;
-       unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       unsigned int us_per_line;
-
-       if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
-                       ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
-               vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
-               if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
-                       return;
-
-               if (vpos >= vupdate_line)
-                       return;
-
-               us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
-               lines_to_vupdate = vupdate_line - vpos;
-               us_to_vupdate = lines_to_vupdate * us_per_line;
-
-               /* 70 us is a conservative estimate of cursor update time*/
-               if (us_to_vupdate < 70)
-                       udelay(us_to_vupdate);
-       }
-#endif
-}
 
 /**
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_position(pipe_ctx);
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
index c279982947e10ae7cb020485ae850f531a81a1dd..10527593868cc91031c48031c9d5532f4f0d5dd0 100644 (file)
@@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .disable_plane = dce110_power_down_fe,
        .pipe_control_lock = dce_pipe_control_lock,
        .interdependent_update_lock = NULL,
+       .cursor_lock = dce_pipe_control_lock,
        .prepare_bandwidth = dce110_prepare_bandwidth,
        .optimize_bandwidth = dce110_optimize_bandwidth,
        .set_drr = set_drr,
index b0357546471b22cf24d40a31480c1a8e09e7f84b..416afb99529d1864031c7d9c5d809aaf2e8c776d 100644 (file)
@@ -1625,6 +1625,85 @@ void dcn10_pipe_control_lock(
                hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
+/**
+ * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
+ *
+ * Software keepout workaround to prevent cursor update locking from stalling
+ * out cursor updates indefinitely or from old values from being retained in
+ * the case where the viewport changes in the same frame as the cursor.
+ *
+ * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
+ * too close to VUPDATE, then stall out until VUPDATE finishes.
+ *
+ * TODO: Optimize cursor programming to be once per frame before VUPDATE
+ *       to avoid the need for this workaround.
+ */
+static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       struct crtc_position position;
+       uint32_t vupdate_start, vupdate_end;
+       unsigned int lines_to_vupdate, us_to_vupdate, vpos;
+       unsigned int us_per_line, us_vupdate;
+
+       if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
+               return;
+
+       if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
+               return;
+
+       dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+                                      &vupdate_end);
+
+       dc->hwss.get_position(&pipe_ctx, 1, &position);
+       vpos = position.vertical_count;
+
+       /* Avoid wraparound calculation issues */
+       vupdate_start += stream->timing.v_total;
+       vupdate_end += stream->timing.v_total;
+       vpos += stream->timing.v_total;
+
+       if (vpos <= vupdate_start) {
+               /* VPOS is in VACTIVE or back porch. */
+               lines_to_vupdate = vupdate_start - vpos;
+       } else if (vpos > vupdate_end) {
+               /* VPOS is in the front porch. */
+               return;
+       } else {
+               /* VPOS is in VUPDATE. */
+               lines_to_vupdate = 0;
+       }
+
+       /* Calculate time until VUPDATE in microseconds. */
+       us_per_line =
+               stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+       us_to_vupdate = lines_to_vupdate * us_per_line;
+
+       /* 70 us is a conservative estimate of cursor update time*/
+       if (us_to_vupdate > 70)
+               return;
+
+       /* Stall out until the cursor update completes. */
+       if (vupdate_end < vupdate_start)
+               vupdate_end += stream->timing.v_total;
+       us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+       udelay(us_to_vupdate + us_vupdate);
+}
+
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
+{
+       /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
+       if (!pipe || pipe->top_pipe)
+               return;
+
+       /* Prevent cursor lock from stalling out cursor updates. */
+       if (lock)
+               delay_cursor_until_vupdate(dc, pipe);
+
+       dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+                       pipe->stream_res.opp->inst, lock);
+}
+
 static bool wait_for_reset_trigger_to_occur(
        struct dc_context *dc_ctx,
        struct timing_generator *tg)
@@ -3226,7 +3305,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
        return vertical_line_start;
 }
 
-static void dcn10_calc_vupdate_position(
+void dcn10_calc_vupdate_position(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
                uint32_t *start_line,
index 16a50e05ffbfaa13ba23121fe5da7443632241f1..42b6e016d71e8d0582acf3a8c8191bf7b9a0077a 100644 (file)
@@ -34,6 +34,11 @@ struct dc;
 void dcn10_hw_sequencer_construct(struct dc *dc);
 
 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_calc_vupdate_position(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               uint32_t *start_line,
+               uint32_t *end_line);
 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
 enum dc_status dcn10_enable_stream_timing(
                struct pipe_ctx *pipe_ctx,
@@ -49,6 +54,7 @@ void dcn10_pipe_control_lock(
        struct dc *dc,
        struct pipe_ctx *pipe,
        bool lock);
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 void dcn10_blank_pixel_data(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
index dd02d3983695aba3f6b91c82637edda27a76f1e2..9e8e32629e4782a8f1e636197d727b14e55003da 100644 (file)
@@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_audio_stream = dce110_disable_audio_stream,
        .disable_plane = dcn10_disable_plane,
        .pipe_control_lock = dcn10_pipe_control_lock,
+       .cursor_lock = dcn10_cursor_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
        .prepare_bandwidth = dcn10_prepare_bandwidth,
        .optimize_bandwidth = dcn10_optimize_bandwidth,
@@ -71,6 +72,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .set_clock = dcn10_set_clock,
        .get_clock = dcn10_get_clock,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .calc_vupdate_position = dcn10_calc_vupdate_position,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
index 04f863499cfb75352ef0a2b6a69fe165f645dabd..3fcd408e9103250933044ff3721de622c7d5247c 100644 (file)
@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
        REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
 
+       /* Configure VUPDATE lock set for this MPCC to map to the OPP */
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
+
        /* update mpc tree mux setting */
        if (tree->opp_list == insert_above_mpcc) {
                /* insert the toppest mpcc */
@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                /* mark this mpcc as not in use */
                mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
        }
 }
 
@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
        }
@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
        REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
        REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
        mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
 
@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
                        MPCC_BUSY, &s->busy);
 }
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
+{
+       struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+       REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
+}
+
 static const struct mpc_funcs dcn10_mpc_funcs = {
        .read_mpcc_state = mpc1_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
        .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
        .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
        .update_blending = mpc1_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .set_denorm = NULL,
        .set_denorm_clamp = NULL,
        .set_output_csc = NULL,
index 962a68e322ee24b5015e3f37ceb9beec600440ab..66a4719c22a0c15a4fe4f2c9696ae16af5fe29d6 100644 (file)
        SRII(MPCC_BG_G_Y, MPCC, inst),\
        SRII(MPCC_BG_R_CR, MPCC, inst),\
        SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_SM_CONTROL, MPCC, inst)
+       SRII(MPCC_SM_CONTROL, MPCC, inst),\
+       SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
 
 #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
-       SRII(MUX, MPC_OUT, inst)
+       SRII(MUX, MPC_OUT, inst),\
+       VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
 
 #define MPC_COMMON_REG_VARIABLE_LIST \
        uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@@ -55,7 +56,9 @@
        uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
        uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
        uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
-       uint32_t MUX[MAX_OPP];
+       uint32_t MUX[MAX_OPP]; \
+       uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
+       uint32_t CUR[MAX_OPP];
 
 #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
        SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@@ -78,7 +81,8 @@
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
-       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
+       SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
 
 #define MPC_REG_FIELD_LIST(type) \
        type MPCC_TOP_SEL;\
        type MPCC_SM_FIELD_ALT;\
        type MPCC_SM_FORCE_NEXT_FRAME_POL;\
        type MPCC_SM_FORCE_NEXT_TOP_POL;\
-       type MPC_OUT_MUX;
+       type MPC_OUT_MUX;\
+       type MPCC_UPDATE_LOCK_SEL;\
+       type CUR_VUPDATE_LOCK_SET;
 
 struct dcn_mpc_registers {
        MPC_COMMON_REG_VARIABLE_LIST
@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
                int mpcc_inst,
                struct mpcc_state *s);
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+
 #endif
index 07265ca7d28cc083eebaaa2381cf3413be78be3e..ba849aa31e6e77346fc6c25f028cf5251929e6a8 100644 (file)
@@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
        .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## 0 ## _ ## block ## id
+
+/* set field/register/bitfield name */
+#define SFRB(field_name, reg_name, bitfield, post_fix)\
+       .field_name = reg_name ## __ ## bitfield ## post_fix
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF_BASE__INST0_SEG ## seg
@@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
 };
 
 static const struct dcn_mpc_shift mpc_shift = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
 };
 
 static const struct dcn_mpc_mask mpc_mask = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
 };
 
 #define tg_regs(id)\
index 22f421e82733b04fa91ca859d5d8ab00646e4f4c..a023a4d59f412ed76cb1fc8308c358c8925f8711 100644 (file)
@@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
 
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
-       REG_WRITE(REFCLK_CNTL, 0);
+       if (REG(REFCLK_CNTL))
+               REG_WRITE(REFCLK_CNTL, 0);
        //
 
 
index 1e73357eda340acace5542a64e6045d6fb65b791..8334bbd6eabbe1e7e1f485e7ea853d368d9437c8 100644 (file)
@@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
@@ -82,6 +83,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .init_vm_ctx = dcn20_init_vm_ctx,
        .set_flip_control_gsl = dcn20_set_flip_control_gsl,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .calc_vupdate_position = dcn10_calc_vupdate_position,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
index de9c857ab3e97abf9bd8ca0e53896a391b46e55f..570dfd9a243f732e878fc4b7e3365d9fa3fc28e5 100644 (file)
@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
        .mpc_init = mpc1_mpc_init,
        .mpc_init_single_inst = mpc1_mpc_init_single_inst,
        .update_blending = mpc2_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
        .wait_for_idle = mpc2_assert_idle_mpcc,
        .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
index c78fd5123497b6f14a4b043aba3c01aec9ba4297..496658f420dbdb6cb167059bc3a84f48023b262c 100644 (file)
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
-       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+       SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
 
 /*
  *     DCN2 MPC_OCSC debug status register:
index 5cdbba0cd87316a7df2611b434f885412e7f4fd4..e4348e3b638985a9d68ef14b7b15a1f9c36696c7 100644 (file)
@@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIO_BASE__INST0_SEG ## seg
@@ -3064,25 +3068,32 @@ validate_out:
        return out;
 }
 
-
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
-               bool fast_validate)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+               struct dc_state *context, bool fast_validate)
 {
        bool voltage_supported = false;
        bool full_pstate_supported = false;
        bool dummy_pstate_supported = false;
        double p_state_latency_us;
 
-       DC_FP_START();
        p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
        context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
                dc->debug.disable_dram_clock_change_vactive_support;
 
        if (fast_validate) {
-               voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
-
-               DC_FP_END();
-               return voltage_supported;
+               return dcn20_validate_bandwidth_internal(dc, context, true);
        }
 
        // Best case, we support full UCLK switch latency
@@ -3111,7 +3122,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 
 restore_dml_state:
        context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+       return voltage_supported;
+}
 
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+               bool fast_validate)
+{
+       bool voltage_supported = false;
+       DC_FP_START();
+       voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
        DC_FP_END();
        return voltage_supported;
 }
index b9ff9767e08fd4680f5f8fb512b81dc5fb7936d8..4dd634118df2effa75d328eadd7f520dfd02dae7 100644 (file)
@@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
@@ -85,6 +86,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .optimize_pwr_state = dcn21_optimize_pwr_state,
        .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
        .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+       .calc_vupdate_position = dcn10_calc_vupdate_position,
        .set_cursor_position = dcn10_set_cursor_position,
        .set_cursor_attribute = dcn10_set_cursor_attribute,
        .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
index b25484aa8222f440f0c0910e7e52139adf10e90d..a721bb401ef0822cfdedbe384e4c08745bc7f3e1 100644 (file)
@@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .dram_channel_width_bytes = 4,
        .fabric_datapath_to_dcn_data_return_bytes = 32,
        .dcn_downspread_percent = 0.5,
-       .downspread_percent = 0.5,
+       .downspread_percent = 0.38,
        .dram_page_open_time_ns = 50.0,
        .dram_rw_turnaround_time_ns = 17.5,
        .dram_return_buffer_per_channel_bytes = 8192,
@@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF0_BASE__INST0_SEG ## seg
@@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 {
        struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
        struct clk_limit_table *clk_table = &bw_params->clk_table;
-       unsigned int i, j, k;
-       int closest_clk_lvl;
+       struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+       unsigned int i, j, closest_clk_lvl;
 
        // Default clock levels are used for diags, which may lead to overclocking.
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
+       if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
                dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
                dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
                dcn2_1_soc.num_chans = bw_params->num_channels;
 
-               /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
-               dcn2_1_soc.clock_limits[0].state = 0;
-               dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
-               dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
-               dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
-               dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
-
-               /*
-                * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
-                * as indicator
-                */
-
-               closest_clk_lvl = -1;
-               /* index currently being filled */
-               k = 1;
-               for (i = 1; i < clk_table->num_entries; i++) {
-                       /* loop backwards, skip duplicate state*/
-                       for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
+               ASSERT(clk_table->num_entries);
+               for (i = 0; i < clk_table->num_entries; i++) {
+                       /* loop backwards*/
+                       for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
                                if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
                                        closest_clk_lvl = j;
                                        break;
                                }
                        }
 
-                       /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
-                       if (closest_clk_lvl != -1) {
-                               dcn2_1_soc.clock_limits[k].state = i;
-                               dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
-                               dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
-                               dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
-                               dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
-                               dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
-                               k++;
-                       }
+                       clock_limits[i].state = i;
+                       clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+                       clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+                       clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+                       clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+                       clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+                       clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+                       clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+                       clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+                       clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+                       clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+                       clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+               }
+               for (i = 0; i < clk_table->num_entries; i++)
+                       dcn2_1_soc.clock_limits[i] = clock_limits[i];
+               if (clk_table->num_entries) {
+                       dcn2_1_soc.num_states = clk_table->num_entries;
+                       /* duplicate last level */
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
                }
-               dcn2_1_soc.num_states = k;
        }
 
-       /* duplicate last level */
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
-
        dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 }
 
index 7ee8b8460a9ba4c60ab01492216bc81bb0f05161..e34c3376efc1bf5822104bb20f0064e94ac73740 100644 (file)
@@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
 endif
 CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
 
 DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
-       dml_common_defs.o
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
index 8c86b63ddf077901ab6f935f97fdca216453dc6d..1e557ddcb63858520b694121f1f9896c26fc670e 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__
 #define __DML20_DISPLAY_RQ_DLG_CALC_H__
 
-#include "../dml_common_defs.h"
 #include "../display_rq_dlg_helpers.h"
 
 struct display_mode_lib;
index 0378406bf7e7739ffeb102d9cb7b303fb10abeac..0d53e871a9d1e18e6a7086fb9f55fea9ec5a47a7 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
 #define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
 
-#include "../dml_common_defs.h"
 #include "../display_rq_dlg_helpers.h"
 
 struct display_mode_lib;
index a38baa73d4841af90d8ea5381ba4fc9ef7d22861..b8ec08e3b7a36da30e6ae22db50153bb27a3c4b7 100644 (file)
@@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
        min_hratio_fact_l = 1.0;
        min_hratio_fact_c = 1.0;
 
-       if (htaps_l <= 1)
+       if (hratio_l <= 1)
                min_hratio_fact_l = 2.0;
        else if (htaps_l <= 6) {
                if ((hratio_l * 2.0) > 4.0)
@@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
 
        hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
 
-       if (htaps_c <= 1)
+       if (hratio_c <= 1)
                min_hratio_fact_c = 2.0;
        else if (htaps_c <= 6) {
                if ((hratio_c * 2.0) > 4.0)
@@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(
 
        disp_dlg_regs->refcyc_per_vm_group_vblank   = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
        disp_dlg_regs->refcyc_per_vm_group_flip     = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
 
        // Clamp to max for now
        if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
index 83e95f8cbff223c8823ca9735bac06560f7a9ad1..e8f7785e3fc639188dd78224846811f029d1fca4 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
 #define __DML21_DISPLAY_RQ_DLG_CALC_H__
 
-#include "../dml_common_defs.h"
+#include "dm_services.h"
 #include "../display_rq_dlg_helpers.h"
 
 struct display_mode_lib;
index cf2758ca5b02f0608bf9ecc45588f323efe867e5..c77c3d827e4afe61c1bccbc735aaba0906d58efb 100644 (file)
 #ifndef __DISPLAY_MODE_LIB_H__
 #define __DISPLAY_MODE_LIB_H__
 
-
-#include "dml_common_defs.h"
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
 #include "display_mode_vba.h"
 
 enum dml_project {
index 5d82fc5a7ed7258d4c34217272eb2a7d3cf27728..3a734171f083407f105dab8a924f188a24952b7d 100644 (file)
@@ -27,8 +27,6 @@
 #ifndef __DML2_DISPLAY_MODE_VBA_H__
 #define __DML2_DISPLAY_MODE_VBA_H__
 
-#include "dml_common_defs.h"
-
 struct display_mode_lib;
 
 void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
index 1f24db830737c4e6e8686a604b1f8f83f18261c3..2555ef0358c25e8a6ba620f52bf6c2a7e2ae2b10 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __DISPLAY_RQ_DLG_HELPERS_H__
 #define __DISPLAY_RQ_DLG_HELPERS_H__
 
-#include "dml_common_defs.h"
 #include "display_mode_lib.h"
 
 /* Function: Printer functions
index 304164986bd8d6655564910b4bd7ee5b049a8500..9c06913ad767668d51433a3788348581632e067a 100644 (file)
@@ -26,8 +26,6 @@
 #ifndef __DISPLAY_RQ_DLG_CALC_H__
 #define __DISPLAY_RQ_DLG_CALC_H__
 
-#include "dml_common_defs.h"
-
 struct display_mode_lib;
 
 #include "display_rq_dlg_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
deleted file mode 100644 (file)
index 723af0b..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dml_common_defs.h"
-#include "dcn_calc_math.h"
-
-#include "dml_inline_defs.h"
-
-double dml_round(double a)
-{
-       double round_pt = 0.5;
-       double ceil = dml_ceil(a, 1);
-       double floor = dml_floor(a, 1);
-
-       if (a - floor >= round_pt)
-               return ceil;
-       else
-               return floor;
-}
-
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
deleted file mode 100644 (file)
index f78cbae..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_COMMON_DEFS_H__
-#define __DC_COMMON_DEFS_H__
-
-#include "dm_services.h"
-#include "dc_features.h"
-#include "display_mode_structs.h"
-#include "display_mode_enums.h"
-
-
-double dml_round(double a);
-
-#endif /* __DC_COMMON_DEFS_H__ */
index ded71ea82413df3ef8b537099eb0860e270a8485..02e06c9b32302efe09c1f3288501ebb5d8989a97 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __DML_INLINE_DEFS_H__
 #define __DML_INLINE_DEFS_H__
 
-#include "dml_common_defs.h"
 #include "dcn_calc_math.h"
 #include "dml_logger.h"
 
@@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity)
        return (double) dcn_bw_floor2(a, granularity);
 }
 
+static inline double dml_round(double a)
+{
+       double round_pt = 0.5;
+       double ceil = dml_ceil(a, 1);
+       double floor = dml_floor(a, 1);
+
+       if (a - floor >= round_pt)
+               return ceil;
+       else
+               return floor;
+}
+
 static inline int dml_log2(double x)
 {
        return dml_round((double)dcn_bw_log(x, 2));
@@ -112,7 +123,7 @@ static inline double dml_log(double x, double base)
 
 static inline unsigned int dml_round_to_multiple(unsigned int num,
                                                 unsigned int multiple,
-                                                bool up)
+                                                unsigned char up)
 {
        unsigned int remainder;
 
index 094afc4c817310332dd78521ee1ebe56e79094b1..50ee8aa7ec3b3317ababfea194699af9d9aaef48 100644 (file)
@@ -210,6 +210,22 @@ struct mpc_funcs {
                struct mpcc_blnd_cfg *blnd_cfg,
                int mpcc_id);
 
+       /*
+        * Lock cursor updates for the specified OPP.
+        * OPP defines the set of MPCC that are locked together for cursor.
+        *
+        * Parameters:
+        * [in]         mpc             - MPC context.
+        * [in]     opp_id      - The OPP to lock cursor updates on
+        * [in]         lock    - lock/unlock the OPP
+        *
+        * Return:  void
+        */
+       void (*cursor_lock)(
+                       struct mpc *mpc,
+                       int opp_id,
+                       bool lock);
+
        struct mpcc* (*get_mpcc_for_dpp)(
                        struct mpc_tree *tree,
                        int dpp_id);
index d4c1fb242c6378a0b8439a7075eba0f288a89fbc..08307f3796e3f78dac581f54d9d89157905a7c39 100644 (file)
@@ -86,11 +86,17 @@ struct hw_sequencer_funcs {
                        struct dc_state *context, bool lock);
        void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
                        bool flip_immediate);
+       void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 
        /* Timing Related */
        void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
                        struct crtc_position *position);
        int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+       void (*calc_vupdate_position)(
+                       struct dc *dc,
+                       struct pipe_ctx *pipe_ctx,
+                       uint32_t *start_line,
+                       uint32_t *end_line);
        void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
                        int group_size, struct pipe_ctx *grouped_pipes[]);
        void (*enable_timing_synchronization)(struct dc *dc,
index c34eba19860a32a9952f26ab094dd7266552c953..6d7bca562eec00cb2093edbf6d09e833a6382206 100644 (file)
 #define ASSERT(expr) ASSERT_CRITICAL(expr)
 
 #else
-#define ASSERT(expr) WARN_ON(!(expr))
+#define ASSERT(expr) WARN_ON_ONCE(!(expr))
 #endif
 
 #define BREAK_TO_DEBUGGER() ASSERT(0)
index 2a12614a12c224d9c7f9c3d532d91a20f8024e4e..8e2acb4df860b800474efd8f5fce1cdc5a1f7ca4 100644 (file)
@@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
                if (*level & profile_mode_mask) {
                        hwmgr->saved_dpm_level = hwmgr->dpm_level;
                        hwmgr->en_umd_pstate = true;
-                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
-                                               AMD_IP_BLOCK_TYPE_GFX,
-                                               AMD_CG_STATE_UNGATE);
                        amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                        AMD_IP_BLOCK_TYPE_GFX,
                                        AMD_PG_STATE_UNGATE);
+                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+                                               AMD_IP_BLOCK_TYPE_GFX,
+                                               AMD_CG_STATE_UNGATE);
                }
        } else {
                /* exit umd pstate, restore level, enable gfx cg*/
@@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->get_asic_baco_capability)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!(hwmgr->not_vf && amdgpu_dpm) ||
-               !hwmgr->hwmgr_func->get_asic_baco_state)
+       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->set_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
index e8b27fab6aa1d3eb66aa470942910c807e1fa22f..e77046931e4c6beb3c908fe2040496d73f3abba1 100644 (file)
@@ -1476,7 +1476,7 @@ static int smu_disable_dpm(struct smu_context *smu)
        bool use_baco = !smu->is_apu &&
                ((adev->in_gpu_reset &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
-                (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+                ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
 
        ret = smu_get_smc_version(smu, NULL, &smu_version);
        if (ret) {
@@ -1744,12 +1744,12 @@ static int smu_enable_umd_pstate(void *handle,
                if (*level & profile_mode_mask) {
                        smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
                        smu_dpm_ctx->enable_umd_pstate = true;
-                       amdgpu_device_ip_set_clockgating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_CG_STATE_UNGATE);
                        amdgpu_device_ip_set_powergating_state(smu->adev,
                                                               AMD_IP_BLOCK_TYPE_GFX,
                                                               AMD_PG_STATE_UNGATE);
+                       amdgpu_device_ip_set_clockgating_state(smu->adev,
+                                                              AMD_IP_BLOCK_TYPE_GFX,
+                                                              AMD_CG_STATE_UNGATE);
                }
        } else {
                /* exit umd pstate, restore level, enable gfx cg*/
index 283615e448386606183f75983b14a49ca3cc86ae..9d89ebf3a749d12cd3543971502e357462a55a47 100644 (file)
@@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
        drm_dp_queue_down_tx(mgr, txmsg);
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
-       if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
-               ret = -EIO;
+       if (ret > 0) {
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+                       ret = -EIO;
+               else
+                       ret = size;
+       }
 
        kfree(txmsg);
 fail_put:
index 116451101426d40e7acf0c8b002ed4bba823378e..d96e3ce3e5359ef5d96438cfdd74500246fff621 100644 (file)
@@ -191,10 +191,11 @@ static const struct edid_quirk {
        { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
        { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
 
-       /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+       /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
        { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
        { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
        { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+       { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
 
        /* Windows Mixed Reality Headsets */
        { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
@@ -5111,7 +5112,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
        struct drm_display_mode *mode;
        unsigned pixel_clock = (timings->pixel_clock[0] |
                                (timings->pixel_clock[1] << 8) |
-                               (timings->pixel_clock[2] << 16));
+                               (timings->pixel_clock[2] << 16)) + 1;
        unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
        unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
        unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
index 7f386adcf87256ed70c73a43eae3908188dff5af..910108ccaae1dbc8d8b0aeee6249098ff12665e9 100644 (file)
@@ -241,8 +241,12 @@ static int drm_hdcp_request_srm(struct drm_device *drm_dev,
 
        ret = request_firmware_direct(&fw, (const char *)fw_name,
                                      drm_dev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               *revoked_ksv_cnt = 0;
+               *revoked_ksv_list = NULL;
+               ret = 0;
                goto exit;
+       }
 
        if (fw->size && fw->data)
                ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
@@ -287,6 +291,8 @@ int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
 
        ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
                                   &revoked_ksv_cnt);
+       if (ret)
+               return ret;
 
        /* revoked_ksv_cnt will be zero when above function failed */
        for (i = 0; i < revoked_ksv_cnt; i++)
index 3b0afa156d92994ad21aef0d910e95b6f0a45240..54def341c1dbe7c83df0e4b57fbf57f7c784031f 100644 (file)
@@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
                }
 
                if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
-                    submit->bos[i].va != mapping->iova)
+                    submit->bos[i].va != mapping->iova) {
+                       etnaviv_gem_mapping_unreference(mapping);
                        return -EINVAL;
+               }
 
                atomic_inc(&etnaviv_obj->gpu_active);
 
index e6795bafcbb9768e84035be679103f4a9709948f..75f9db8f7becc0c2dcc1bd9735a8b997eec9c9dc 100644 (file)
@@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
                if (!(gpu->identity.features & meta->feature))
                        continue;
 
-               if (meta->nr_domains < (index - offset)) {
+               if (index - offset >= meta->nr_domains) {
                        offset += meta->nr_domains;
                        continue;
                }
index 2e5d835a9eaa866e0a6c8c94b35ff00cbcf04db7..c125ca9ab9b3a93fa4c27da86f29140e4618e9e2 100644 (file)
@@ -485,8 +485,7 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
        if (!ret)
                goto err_llb;
        else if (ret > 1) {
-               DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
-
+               DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
        }
 
        fbc->threshold = ret;
index 0cc40e77bbd2fcb2bb85a01768993a57a08e9a88..4f96c8788a2ec1b396b7b11a35dc377e787260ec 100644 (file)
@@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_vma *vma;
 
-       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
        if (!atomic_read(&obj->bind_count))
                return;
 
@@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 void
 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = vma->obj;
-
-       assert_object_held(obj);
-
        /* Bump the LRU to try and avoid premature eviction whilst flipping  */
-       i915_gem_object_bump_inactive_ggtt(obj);
+       i915_gem_object_bump_inactive_ggtt(vma->obj);
 
        i915_vma_unpin(vma);
 }
index 37f77aee121212cac2e155fe13f161c74ebbeee6..0158e49bf9bb73cb903c68d9e2ff5e87561bc43d 100644 (file)
@@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
                              int tiling_mode, unsigned int stride)
 {
        struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
-       struct i915_vma *vma;
+       struct i915_vma *vma, *vn;
+       LIST_HEAD(unbind);
        int ret = 0;
 
        if (tiling_mode == I915_TILING_NONE)
                return 0;
 
        mutex_lock(&ggtt->vm.mutex);
+
+       spin_lock(&obj->vma.lock);
        for_each_ggtt_vma(vma, obj) {
+               GEM_BUG_ON(vma->vm != &ggtt->vm);
+
                if (i915_vma_fence_prepare(vma, tiling_mode, stride))
                        continue;
 
+               list_move(&vma->vm_link, &unbind);
+       }
+       spin_unlock(&obj->vma.lock);
+
+       list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
                ret = __i915_vma_unbind(vma);
-               if (ret)
+               if (ret) {
+                       /* Restore the remaining vma on an error */
+                       list_splice(&unbind, &ggtt->vm.bound_list);
                        break;
+               }
        }
+
        mutex_unlock(&ggtt->vm.mutex);
 
        return ret;
@@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
        }
        mutex_unlock(&obj->mm.lock);
 
+       spin_lock(&obj->vma.lock);
        for_each_ggtt_vma(vma, obj) {
                vma->fence_size =
                        i915_gem_fence_size(i915, vma->size, tiling, stride);
@@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                if (vma->fence)
                        vma->fence->dirty = true;
        }
+       spin_unlock(&obj->vma.lock);
 
        obj->tiling_and_stride = tiling | stride;
        i915_gem_object_unlock(obj);
index 2d0fd50c53124428f8fc19fb4e27f9e55987687e..d4f94ca9ae0dd5fe6258b6cc354f636650f4b8b2 100644 (file)
@@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
                unsigned int page_size = BIT(first);
 
                obj = i915_gem_object_create_internal(dev_priv, page_size);
-               if (IS_ERR(obj))
-                       return PTR_ERR(obj);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto out_vm;
+               }
 
                vma = i915_vma_instance(obj, vm, NULL);
                if (IS_ERR(vma)) {
@@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
        }
 
        obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
-       if (IS_ERR(obj))
-               return PTR_ERR(obj);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto out_vm;
+       }
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
index 07cb83a0d0171a48fbedee19944bb0064b142416..ca0d4f4f3615b6d7ab78392bc3e002818938d63f 100644 (file)
@@ -69,7 +69,13 @@ struct intel_context {
 #define CONTEXT_NOPREEMPT              7
 
        u32 *lrc_reg_state;
-       u64 lrc_desc;
+       union {
+               struct {
+                       u32 lrca;
+                       u32 ccid;
+               };
+               u64 desc;
+       } lrc;
        u32 tag; /* cookie passed to HW to track this context on submission */
 
        /* Time on GPU as tracked by the hw. */
index b469de0dd9b656c25ae3e76dcf086836fd1f624c..a1aa0d3e8be1d5c51badcc555dbe94c92aa78953 100644 (file)
@@ -333,13 +333,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
        return intel_engine_has_preemption(engine);
 }
 
-static inline bool
-intel_engine_has_timeslices(const struct intel_engine_cs *engine)
-{
-       if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-               return false;
-
-       return intel_engine_has_semaphores(engine);
-}
-
 #endif /* _INTEL_RINGBUFFER_H_ */
index 3aa8a652c16dbdd092dba754fad8df057d82ba5b..883a9b7fe88d5a6a0b4398d3a3793b727b015343 100644 (file)
@@ -1295,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 
        if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
                drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+       if (HAS_EXECLISTS(dev_priv)) {
+               drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+                          ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+               drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+                          ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+       }
        drm_printf(m, "\tRING_START: 0x%08x\n",
                   ENGINE_READ(engine, RING_START));
        drm_printf(m, "\tRING_HEAD:  0x%08x\n",
index 80cdde7128420c21e2b5cbfb9f363d421ac997d5..0be674ae1cf63ff4a2adf665696dca8f8b9b74a1 100644 (file)
@@ -156,6 +156,20 @@ struct intel_engine_execlists {
         */
        struct i915_priolist default_priolist;
 
+       /**
+        * @ccid: identifier for contexts submitted to this engine
+        */
+       u32 ccid;
+
+       /**
+        * @yield: CCID at the time of the last semaphore-wait interrupt.
+        *
+        * Instead of leaving a semaphore busy-spinning on an engine, we would
+        * like to switch to another ready context, i.e. yielding the semaphore
+        * timeslice.
+        */
+       u32 yield;
+
        /**
         * @error_interrupt: CS Master EIR
         *
@@ -295,8 +309,7 @@ struct intel_engine_cs {
        u32 context_size;
        u32 mmio_base;
 
-       unsigned int context_tag;
-#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
+       unsigned long context_tag;
 
        struct rb_node uabi_node;
 
@@ -483,10 +496,11 @@ struct intel_engine_cs {
 #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
 #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
 #define I915_ENGINE_HAS_SEMAPHORES   BIT(3)
-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
-#define I915_ENGINE_IS_VIRTUAL       BIT(5)
-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+#define I915_ENGINE_HAS_TIMESLICES   BIT(4)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
+#define I915_ENGINE_IS_VIRTUAL       BIT(6)
+#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
+#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
        unsigned int flags;
 
        /*
@@ -584,6 +598,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
        return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
 }
 
+static inline bool
+intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+{
+       if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+               return false;
+
+       return engine->flags & I915_ENGINE_HAS_TIMESLICES;
+}
+
 static inline bool
 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
 {
index f0e7fd95165a7d15058237e578cbb253c76c27d9..0cc7dd54f4f963ae6dc29698c3839a82e1667a0d 100644 (file)
@@ -39,6 +39,15 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
                }
        }
 
+       if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+               WRITE_ONCE(engine->execlists.yield,
+                          ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+               ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+                            engine->execlists.yield);
+               if (del_timer(&engine->execlists.timer))
+                       tasklet = true;
+       }
+
        if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
                tasklet = true;
 
@@ -228,7 +237,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
        const u32 irqs =
                GT_CS_MASTER_ERROR_INTERRUPT |
                GT_RENDER_USER_INTERRUPT |
-               GT_CONTEXT_SWITCH_INTERRUPT;
+               GT_CONTEXT_SWITCH_INTERRUPT |
+               GT_WAIT_SEMAPHORE_INTERRUPT;
        struct intel_uncore *uncore = gt->uncore;
        const u32 dmask = irqs << 16 | irqs;
        const u32 smask = irqs << 16;
@@ -366,7 +376,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
        const u32 irqs =
                GT_CS_MASTER_ERROR_INTERRUPT |
                GT_RENDER_USER_INTERRUPT |
-               GT_CONTEXT_SWITCH_INTERRUPT;
+               GT_CONTEXT_SWITCH_INTERRUPT |
+               GT_WAIT_SEMAPHORE_INTERRUPT;
        const u32 gt_interrupts[] = {
                irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
                irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
index 683014e7bc51dfff4e68f807e2668e592059fc2d..2dfaddb8811edf863a1cec02bdefcebfd8941dd0 100644 (file)
@@ -456,10 +456,10 @@ assert_priority_queue(const struct i915_request *prev,
  * engine info, SW context ID and SW counter need to form a unique number
  * (Context ID) per lrc.
  */
-static u64
+static u32
 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 {
-       u64 desc;
+       u32 desc;
 
        desc = INTEL_LEGACY_32B_CONTEXT;
        if (i915_vm_is_4lvl(ce->vm))
@@ -470,21 +470,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
        if (IS_GEN(engine->i915, 8))
                desc |= GEN8_CTX_L3LLC_COHERENT;
 
-       desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */
-       /*
-        * The following 32bits are copied into the OA reports (dword 2).
-        * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
-        * anything below.
-        */
-       if (INTEL_GEN(engine->i915) >= 11) {
-               desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
-                                                               /* bits 48-53 */
-
-               desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
-                                                               /* bits 61-63 */
-       }
-
-       return desc;
+       return i915_ggtt_offset(ce->state) | desc;
 }
 
 static inline unsigned int dword_in_page(void *addr)
@@ -1192,7 +1178,7 @@ static void reset_active(struct i915_request *rq,
        __execlists_update_reg_state(ce, engine, head);
 
        /* We've switched away, so this should be a no-op, but intent matters */
-       ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+       ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
 }
 
 static u32 intel_context_get_runtime(const struct intel_context *ce)
@@ -1251,18 +1237,23 @@ __execlists_schedule_in(struct i915_request *rq)
        if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                execlists_check_context(ce, engine);
 
-       ce->lrc_desc &= ~GENMASK_ULL(47, 37);
        if (ce->tag) {
                /* Use a fixed tag for OA and friends */
-               ce->lrc_desc |= (u64)ce->tag << 32;
+               GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
+               ce->lrc.ccid = ce->tag;
        } else {
                /* We don't need a strict matching tag, just different values */
-               ce->lrc_desc |=
-                       (u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
-                       GEN11_SW_CTX_ID_SHIFT;
-               BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
+               unsigned int tag = ffs(engine->context_tag);
+
+               GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
+               clear_bit(tag - 1, &engine->context_tag);
+               ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32);
+
+               BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
        }
 
+       ce->lrc.ccid |= engine->execlists.ccid;
+
        __intel_gt_pm_get(engine->gt);
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
        intel_engine_context_in(engine);
@@ -1302,7 +1293,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 
 static inline void
 __execlists_schedule_out(struct i915_request *rq,
-                        struct intel_engine_cs * const engine)
+                        struct intel_engine_cs * const engine,
+                        unsigned int ccid)
 {
        struct intel_context * const ce = rq->context;
 
@@ -1320,6 +1312,14 @@ __execlists_schedule_out(struct i915_request *rq,
            i915_request_completed(rq))
                intel_engine_add_retire(engine, ce->timeline);
 
+       ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+       ccid &= GEN12_MAX_CONTEXT_HW_ID;
+       if (ccid < BITS_PER_LONG) {
+               GEM_BUG_ON(ccid == 0);
+               GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
+               set_bit(ccid - 1, &engine->context_tag);
+       }
+
        intel_context_update_runtime(ce);
        intel_engine_context_out(engine);
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
@@ -1345,15 +1345,17 @@ execlists_schedule_out(struct i915_request *rq)
 {
        struct intel_context * const ce = rq->context;
        struct intel_engine_cs *cur, *old;
+       u32 ccid;
 
        trace_i915_request_out(rq);
 
+       ccid = rq->context->lrc.ccid;
        old = READ_ONCE(ce->inflight);
        do
                cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL;
        while (!try_cmpxchg(&ce->inflight, &old, cur));
        if (!cur)
-               __execlists_schedule_out(rq, old);
+               __execlists_schedule_out(rq, old, ccid);
 
        i915_request_put(rq);
 }
@@ -1361,7 +1363,7 @@ execlists_schedule_out(struct i915_request *rq)
 static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = rq->context;
-       u64 desc = ce->lrc_desc;
+       u64 desc = ce->lrc.desc;
        u32 tail, prev;
 
        /*
@@ -1400,7 +1402,7 @@ static u64 execlists_update_context(struct i915_request *rq)
         */
        wmb();
 
-       ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+       ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
        return desc;
 }
 
@@ -1719,6 +1721,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
                        struct i915_request *w =
                                container_of(p->waiter, typeof(*w), sched);
 
+                       if (p->flags & I915_DEPENDENCY_WEAK)
+                               continue;
+
                        /* Leave semaphores spinning on the other engines */
                        if (w->engine != rq->engine)
                                continue;
@@ -1754,7 +1759,8 @@ static void defer_active(struct intel_engine_cs *engine)
 }
 
 static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+              const struct i915_request *rq)
 {
        int hint;
 
@@ -1768,6 +1774,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
        return hint >= effective_prio(rq);
 }
 
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+               const struct i915_request *rq)
+{
+       /*
+        * Once bitten, forever smitten!
+        *
+        * If the active context ever busy-waited on a semaphore,
+        * it will be treated as a hog until the end of its timeslice (i.e.
+        * until it is scheduled out and replaced by a new submission,
+        * possibly even its own lite-restore). The HW only sends an interrupt
+        * on the first miss, and we do know if that semaphore has been
+        * signaled, or even if it is now stuck on another semaphore. Play
+        * safe, yield if it might be stuck -- it will be given a fresh
+        * timeslice in the near future.
+        */
+       return rq->context->lrc.ccid == READ_ONCE(el->yield);
+}
+
+static bool
+timeslice_expired(const struct intel_engine_execlists *el,
+                 const struct i915_request *rq)
+{
+       return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
 static int
 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
 {
@@ -1783,8 +1815,7 @@ timeslice(const struct intel_engine_cs *engine)
        return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
 {
        const struct intel_engine_execlists *execlists = &engine->execlists;
        const struct i915_request *rq = *execlists->active;
@@ -1946,13 +1977,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                        last = NULL;
                } else if (need_timeslice(engine, last) &&
-                          timer_expired(&engine->execlists.timer)) {
+                          timeslice_expired(execlists, last)) {
                        ENGINE_TRACE(engine,
-                                    "expired last=%llx:%lld, prio=%d, hint=%d\n",
+                                    "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
                                     last->fence.context,
                                     last->fence.seqno,
                                     last->sched.attr.priority,
-                                    execlists->queue_priority_hint);
+                                    execlists->queue_priority_hint,
+                                    yesno(timeslice_yield(execlists, last)));
 
                        ring_set_paused(engine, 1);
                        defer_active(engine);
@@ -2213,6 +2245,7 @@ done:
                }
                clear_ports(port + 1, last_port - port);
 
+               WRITE_ONCE(execlists->yield, -1);
                execlists_submit_ports(engine);
                set_preempt_timeout(engine, *active);
        } else {
@@ -3043,7 +3076,7 @@ __execlists_context_pin(struct intel_context *ce,
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
 
-       ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
+       ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
        __execlists_update_reg_state(ce, engine, ce->ring->tail);
 
@@ -3072,7 +3105,7 @@ static void execlists_context_reset(struct intel_context *ce)
                                 ce, ce->engine, ce->ring, true);
        __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
 
-       ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+       ce->lrc.desc |= CTX_DESC_FORCE_RESTORE;
 }
 
 static const struct intel_context_ops execlists_context_ops = {
@@ -3541,7 +3574,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
 
        enable_error_interrupt(engine);
 
-       engine->context_tag = 0;
+       engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
 }
 
 static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -3753,7 +3786,7 @@ out_replay:
                     head, ce->ring->tail);
        __execlists_reset_reg_state(ce, engine);
        __execlists_update_reg_state(ce, engine, head);
-       ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
+       ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
 
 unwind:
        /* Push back any incomplete requests for replay after the reset. */
@@ -4369,8 +4402,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
        engine->flags |= I915_ENGINE_SUPPORTS_STATS;
        if (!intel_vgpu_active(engine->i915)) {
                engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
-               if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+               if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
                        engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+                       if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+                               engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+               }
        }
 
        if (INTEL_GEN(engine->i915) >= 12)
@@ -4449,6 +4485,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
        engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
        engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
        engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+       engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
 }
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4516,6 +4553,11 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
        else
                execlists->csb_size = GEN11_CSB_ENTRIES;
 
+       if (INTEL_GEN(engine->i915) >= 11) {
+               execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
+               execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
+       }
+
        reset_csb_pointers(engine);
 
        /* Finally, take ownership and responsibility for cleanup! */
index 91debbc97c9ad8baa6790749a797d83a7731d284..08b56d7ab4f45054754373b5b47a541755f4c01b 100644 (file)
@@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 
        rcu_read_lock();
        cl = rcu_dereference(from->hwsp_cacheline);
+       if (i915_request_completed(from)) /* confirm cacheline is valid */
+               goto unlock;
        if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
                goto unlock; /* seqno wrapped and completed! */
        if (unlikely(i915_request_completed(from)))
index 6f06ba750a0a8e2518d8ce458ad49355955cefda..f95ae15ce865c0fbad7fd7f4a85e655ba9b0c17e 100644 (file)
@@ -929,7 +929,7 @@ create_rewinder(struct intel_context *ce,
                        goto err;
        }
 
-       cs = intel_ring_begin(rq, 10);
+       cs = intel_ring_begin(rq, 14);
        if (IS_ERR(cs)) {
                err = PTR_ERR(cs);
                goto err;
@@ -941,8 +941,8 @@ create_rewinder(struct intel_context *ce,
        *cs++ = MI_SEMAPHORE_WAIT |
                MI_SEMAPHORE_GLOBAL_GTT |
                MI_SEMAPHORE_POLL |
-               MI_SEMAPHORE_SAD_NEQ_SDD;
-       *cs++ = 0;
+               MI_SEMAPHORE_SAD_GTE_SDD;
+       *cs++ = idx;
        *cs++ = offset;
        *cs++ = 0;
 
@@ -951,6 +951,11 @@ create_rewinder(struct intel_context *ce,
        *cs++ = offset + idx * sizeof(u32);
        *cs++ = 0;
 
+       *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+       *cs++ = offset;
+       *cs++ = 0;
+       *cs++ = idx + 1;
+
        intel_ring_advance(rq, cs);
 
        rq->sched.attr.priority = I915_PRIORITY_MASK;
@@ -984,7 +989,7 @@ static int live_timeslice_rewind(void *arg)
 
        for_each_engine(engine, gt, id) {
                enum { A1, A2, B1 };
-               enum { X = 1, Y, Z };
+               enum { X = 1, Z, Y };
                struct i915_request *rq[3] = {};
                struct intel_context *ce;
                unsigned long heartbeat;
@@ -1017,13 +1022,13 @@ static int live_timeslice_rewind(void *arg)
                        goto err;
                }
 
-               rq[0] = create_rewinder(ce, NULL, slot, 1);
+               rq[0] = create_rewinder(ce, NULL, slot, X);
                if (IS_ERR(rq[0])) {
                        intel_context_put(ce);
                        goto err;
                }
 
-               rq[1] = create_rewinder(ce, NULL, slot, 2);
+               rq[1] = create_rewinder(ce, NULL, slot, Y);
                intel_context_put(ce);
                if (IS_ERR(rq[1]))
                        goto err;
@@ -1041,7 +1046,7 @@ static int live_timeslice_rewind(void *arg)
                        goto err;
                }
 
-               rq[2] = create_rewinder(ce, rq[0], slot, 3);
+               rq[2] = create_rewinder(ce, rq[0], slot, Z);
                intel_context_put(ce);
                if (IS_ERR(rq[2]))
                        goto err;
@@ -1055,15 +1060,12 @@ static int live_timeslice_rewind(void *arg)
                GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
 
                /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
-               GEM_BUG_ON(!i915_request_is_active(rq[A1]));
-               GEM_BUG_ON(!i915_request_is_active(rq[A2]));
-               GEM_BUG_ON(!i915_request_is_active(rq[B1]));
-
-               /* Wait for the timeslice to kick in */
-               del_timer(&engine->execlists.timer);
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-               intel_engine_flush_submission(engine);
-
+               if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */
+                       /* Wait for the timeslice to kick in */
+                       del_timer(&engine->execlists.timer);
+                       tasklet_hi_schedule(&engine->execlists.tasklet);
+                       intel_engine_flush_submission(engine);
+               }
                /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
                GEM_BUG_ON(!i915_request_is_active(rq[A1]));
                GEM_BUG_ON(!i915_request_is_active(rq[B1]));
index fe7778c28d2d7846c308da40668bf86dd445765c..aa6d56e25a10ae3fa6b3a5d26a16e0d45bd69917 100644 (file)
@@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc,
 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
-       u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
+       u32 ctx_desc = rq->context->lrc.ccid;
        u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
        guc_wq_item_append(guc, engine->guc_id, ctx_desc,
index a83df2f84eb99c04a7d9aaa748cdfb9ee607b05f..a1696e9ce4b6c07c69d20b4dc65e140923bd443e 100644 (file)
@@ -208,14 +208,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
                                SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
-               vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
-                               LCPLL_PLL_ENABLE |
-                               LCPLL_PLL_LOCK;
-               vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
-
+               /*
+                * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+                *  tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+                *   TRANSCODER_A can be enabled. PORT_x depends on the input of
+                *   setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+                *   so we fixed to DPLL0 here.
+                * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+                */
+               vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+                       DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+               vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+                       DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+               vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+                       LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+               vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+               /*
+                * Golden M/N are calculated based on:
+                *   24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+                *   DP link clk 1620 MHz and non-constant_n.
+                * TODO: calculate DP link symbol clk and stream clk m/n.
+                */
+               vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+               vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+               vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+               vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+               vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
        }
 
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
                vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -236,6 +263,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        }
 
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -256,6 +289,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        }
 
        if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+                       ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+               vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+                       DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
                vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
index cb11c31840857293ff2d3b7a0d7688e4ccf02e35..e92ed96c9b239422776f18b7132f818fa04162fd 100644 (file)
@@ -290,7 +290,7 @@ static void
 shadow_context_descriptor_update(struct intel_context *ce,
                                 struct intel_vgpu_workload *workload)
 {
-       u64 desc = ce->lrc_desc;
+       u64 desc = ce->lrc.desc;
 
        /*
         * Update bits 0-11 of the context descriptor which includes flags
@@ -300,7 +300,7 @@ shadow_context_descriptor_update(struct intel_context *ce,
        desc |= (u64)workload->ctx_desc.addressing_mode <<
                GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       ce->lrc_desc = desc;
+       ce->lrc.desc = desc;
 }
 
 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
@@ -379,7 +379,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
                for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
                        struct i915_page_directory * const pd =
                                i915_pd_entry(ppgtt->pd, i);
-
+                       /* skip now as current i915 ppgtt alloc won't allocate
+                          top level pdp for non 4-level table, won't impact
+                          shadow ppgtt. */
+                       if (!pd)
+                               break;
                        px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
                }
        }
index 4518b9b35c3d4f43aafe3f9ac004e7932bc51f1b..02ad1acd117c4dea7d9ca371996de84aee89105d 100644 (file)
@@ -128,6 +128,13 @@ search_again:
        active = NULL;
        INIT_LIST_HEAD(&eviction_list);
        list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
+               if (vma == active) { /* now seen this vma twice */
+                       if (flags & PIN_NONBLOCK)
+                               break;
+
+                       active = ERR_PTR(-EAGAIN);
+               }
+
                /*
                 * We keep this list in a rough least-recently scanned order
                 * of active elements (inactive elements are cheap to reap).
@@ -143,21 +150,12 @@ search_again:
                 * To notice when we complete one full cycle, we record the
                 * first active element seen, before moving it to the tail.
                 */
-               if (i915_vma_is_active(vma)) {
-                       if (vma == active) {
-                               if (flags & PIN_NONBLOCK)
-                                       break;
-
-                               active = ERR_PTR(-EAGAIN);
-                       }
-
-                       if (active != ERR_PTR(-EAGAIN)) {
-                               if (!active)
-                                       active = vma;
+               if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
+                       if (!active)
+                               active = vma;
 
-                               list_move_tail(&vma->vm_link, &vm->bound_list);
-                               continue;
-                       }
+                       list_move_tail(&vma->vm_link, &vm->bound_list);
+                       continue;
                }
 
                if (mark_free(&scan, vma, flags, &eviction_list))
index 2a4cd0ba546459a622536a7bed16ed4b6e014680..5c8e51d2ba5b3e139581f14f8713b5f555bb64b7 100644 (file)
@@ -1207,8 +1207,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
 static void record_request(const struct i915_request *request,
                           struct i915_request_coredump *erq)
 {
-       const struct i915_gem_context *ctx;
-
        erq->flags = request->fence.flags;
        erq->context = request->fence.context;
        erq->seqno = request->fence.seqno;
@@ -1219,9 +1217,13 @@ static void record_request(const struct i915_request *request,
 
        erq->pid = 0;
        rcu_read_lock();
-       ctx = rcu_dereference(request->context->gem_context);
-       if (ctx)
-               erq->pid = pid_nr(ctx->pid);
+       if (!intel_context_is_closed(request->context)) {
+               const struct i915_gem_context *ctx;
+
+               ctx = rcu_dereference(request->context->gem_context);
+               if (ctx)
+                       erq->pid = pid_nr(ctx->pid);
+       }
        rcu_read_unlock();
 }
 
index 9f0653cf05102d896099118c69ed911044ba17cc..8a2b83807ffcddf9190d726bee60aea57413181c 100644 (file)
@@ -3358,9 +3358,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
 
-       u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+               GEN8_PIPE_CDCLK_CRC_DONE;
        u32 de_pipe_enables;
-       u32 de_port_masked = GEN8_AUX_CHANNEL_A;
+       u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
        u32 de_port_enables;
        u32 de_misc_masked = GEN8_DE_EDP_PSR;
        enum pipe pipe;
@@ -3368,21 +3369,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        if (INTEL_GEN(dev_priv) <= 10)
                de_misc_masked |= GEN8_DE_MISC_GSE;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
-               de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
-                                 GEN9_AUX_CHANNEL_D;
-               if (IS_GEN9_LP(dev_priv))
-                       de_port_masked |= BXT_DE_PORT_GMBUS;
-       } else {
-               de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-       }
-
-       if (INTEL_GEN(dev_priv) >= 11)
-               de_port_masked |= ICL_AUX_CHANNEL_E;
-
-       if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
-               de_port_masked |= CNL_AUX_CHANNEL_F;
+       if (IS_GEN9_LP(dev_priv))
+               de_port_masked |= BXT_DE_PORT_GMBUS;
 
        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
                                           GEN8_PIPE_FIFO_UNDERRUN;
index 66a46e41d5ef90046b9570e624244659482f9ec4..cf2c01f17da837235dd8f79e1614cf896e814299 100644 (file)
@@ -1310,8 +1310,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
                         * dropped by GuC. They won't be part of the context
                         * ID in the OA reports, so squash those lower bits.
                         */
-                       stream->specific_ctx_id =
-                               lower_32_bits(ce->lrc_desc) >> 12;
+                       stream->specific_ctx_id = ce->lrc.lrca >> 12;
 
                        /*
                         * GuC uses the top bit to signal proxy submission, so
@@ -1328,11 +1327,10 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
                        ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
                /*
                 * Pick an unused context id
-                * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
+                * 0 - BITS_PER_LONG are used by other contexts
                 * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
                 */
                stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
-               BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
                break;
        }
 
index e0c6021fdaf989c96e207ec5c946a9115d144b5b..6e12000c4b6b5cd7cb181c8bd2954a8da7d2b574 100644 (file)
@@ -3094,6 +3094,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GT_BSD_CS_ERROR_INTERRUPT              (1 << 15)
 #define GT_BSD_USER_INTERRUPT                  (1 << 12)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT            REG_BIT(11) /* bdw+ */
 #define GT_CONTEXT_SWITCH_INTERRUPT            (1 <<  8)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT    (1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT     (1 <<  4)
index c0df71d7d0ff1499f53d9b98a27f7641fce87a29..e2b78db685eaecb9599dbd5b2c28f60a104e51e0 100644 (file)
@@ -1017,11 +1017,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
        GEM_BUG_ON(to == from);
        GEM_BUG_ON(to->timeline == from->timeline);
 
-       if (i915_request_completed(from))
+       if (i915_request_completed(from)) {
+               i915_sw_fence_set_error_once(&to->submit, from->fence.error);
                return 0;
+       }
 
        if (to->engine->schedule) {
-               ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
+               ret = i915_sched_node_add_dependency(&to->sched,
+                                                    &from->sched,
+                                                    I915_DEPENDENCY_EXTERNAL);
                if (ret < 0)
                        return ret;
        }
@@ -1183,7 +1187,9 @@ __i915_request_await_execution(struct i915_request *to,
 
        /* Couple the dependency tree for PI on this exposed to->fence */
        if (to->engine->schedule) {
-               err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+               err = i915_sched_node_add_dependency(&to->sched,
+                                                    &from->sched,
+                                                    I915_DEPENDENCY_WEAK);
                if (err < 0)
                        return err;
        }
index 68b06a7ba667da3dfd74421aff77f5935eaf0f80..f0a9e8958ca0d64fb73fb1219e86e8607c779b12 100644 (file)
@@ -456,7 +456,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 }
 
 int i915_sched_node_add_dependency(struct i915_sched_node *node,
-                                  struct i915_sched_node *signal)
+                                  struct i915_sched_node *signal,
+                                  unsigned long flags)
 {
        struct i915_dependency *dep;
 
@@ -465,8 +466,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
                return -ENOMEM;
 
        if (!__i915_sched_node_add_dependency(node, signal, dep,
-                                             I915_DEPENDENCY_EXTERNAL |
-                                             I915_DEPENDENCY_ALLOC))
+                                             flags | I915_DEPENDENCY_ALLOC))
                i915_dependency_free(dep);
 
        return 0;
index d1dc4efef77b562f3b044050ddab332ed64ab2fe..6f0bf00fc5690d361800454029bbea703b393790 100644 (file)
@@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
                                      unsigned long flags);
 
 int i915_sched_node_add_dependency(struct i915_sched_node *node,
-                                  struct i915_sched_node *signal);
+                                  struct i915_sched_node *signal,
+                                  unsigned long flags);
 
 void i915_sched_node_fini(struct i915_sched_node *node);
 
index d18e7055005424e67516e59f6a8a214a6978abf0..7186875088a0a285c1f05a13df5846db478f4162 100644 (file)
@@ -78,6 +78,7 @@ struct i915_dependency {
        unsigned long flags;
 #define I915_DEPENDENCY_ALLOC          BIT(0)
 #define I915_DEPENDENCY_EXTERNAL       BIT(1)
+#define I915_DEPENDENCY_WEAK           BIT(2)
 };
 
 #endif /* _I915_SCHEDULER_TYPES_H_ */
index 08699fa069aa6f5e8b1f4b34b98b87f12fe1cfe8..2cd7a7e87c0a750ff87db98f9bfce927af88dda6 100644 (file)
@@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
 
        GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
 
+       spin_lock(&obj->vma.lock);
+
        if (i915_is_ggtt(vm)) {
                if (unlikely(overflows_type(vma->size, u32)))
-                       goto err_vma;
+                       goto err_unlock;
 
                vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
                                                      i915_gem_object_get_tiling(obj),
                                                      i915_gem_object_get_stride(obj));
                if (unlikely(vma->fence_size < vma->size || /* overflow */
                             vma->fence_size > vm->total))
-                       goto err_vma;
+                       goto err_unlock;
 
                GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
 
@@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
                __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
        }
 
-       spin_lock(&obj->vma.lock);
-
        rb = NULL;
        p = &obj->vma.tree.rb_node;
        while (*p) {
@@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
 
        return vma;
 
+err_unlock:
+       spin_unlock(&obj->vma.lock);
 err_vma:
        i915_vma_free(vma);
        return ERR_PTR(-E2BIG);
@@ -1226,18 +1228,6 @@ int __i915_vma_unbind(struct i915_vma *vma)
 
        lockdep_assert_held(&vma->vm->mutex);
 
-       /*
-        * First wait upon any activity as retiring the request may
-        * have side-effects such as unpinning or even unbinding this vma.
-        *
-        * XXX Actually waiting under the vm->mutex is a hinderance and
-        * should be pipelined wherever possible. In cases where that is
-        * unavoidable, we should lift the wait to before the mutex.
-        */
-       ret = i915_vma_sync(vma);
-       if (ret)
-               return ret;
-
        if (i915_vma_is_pinned(vma)) {
                vma_print_allocator(vma, "is pinned");
                return -EAGAIN;
@@ -1311,15 +1301,20 @@ int i915_vma_unbind(struct i915_vma *vma)
        if (!drm_mm_node_allocated(&vma->node))
                return 0;
 
-       if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
-               /* XXX not always required: nop_clear_range */
-               wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
        /* Optimistic wait before taking the mutex */
        err = i915_vma_sync(vma);
        if (err)
                goto out_rpm;
 
+       if (i915_vma_is_pinned(vma)) {
+               vma_print_allocator(vma, "is pinned");
+               return -EAGAIN;
+       }
+
+       if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+               /* XXX not always required: nop_clear_range */
+               wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
        err = mutex_lock_interruptible(&vm->mutex);
        if (err)
                goto out_rpm;
index 8375054ba27d92b72f6b03a0e15f2703b5befdb5..a52986a9e7a68245f670746e1e3ba32ad90db042 100644 (file)
@@ -4992,7 +4992,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
         * WaIncreaseLatencyIPCEnabled: kbl,cfl
         * Display WA #1141: kbl,cfl
         */
-       if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
+       if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
            dev_priv->ipc_enabled)
                latency += 4;
 
index 58b5f40a07dd6be16eedab36d8c52d9ff9e2e914..af89c7fc8f59340bad9d38628b0c6f751caed521 100644 (file)
@@ -173,7 +173,7 @@ static int igt_vma_create(void *arg)
                }
 
                nc = 0;
-               for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) {
+               for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) {
                        for (; nc < num_ctx; nc++) {
                                ctx = mock_context(i915, "mock");
                                if (!ctx)
index 9dfe7cb530e1184c5d158c12622bcd5242dd1fa7..548cc25ea4abed9565f1eebed9af9aa92b908a58 100644 (file)
@@ -328,8 +328,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
        if (!drm_atomic_crtc_needs_modeset(state))
                return 0;
 
-       if (state->mode.hdisplay > priv->soc_info->max_height ||
-           state->mode.vdisplay > priv->soc_info->max_width)
+       if (state->mode.hdisplay > priv->soc_info->max_width ||
+           state->mode.vdisplay > priv->soc_info->max_height)
                return -EINVAL;
 
        rate = clk_round_rate(priv->pix_clk,
@@ -474,7 +474,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
 static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
 {
-       struct ingenic_drm *priv = arg;
+       struct ingenic_drm *priv = drm_device_get_priv(arg);
        unsigned int state;
 
        regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
@@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
        { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
 
 static struct platform_driver ingenic_drm_driver = {
        .driver = {
index b5f5eb7b4bb904b311f5994750e5830026a8ae1b..8c2e1b47e81a59c582b4556ed7cde3f7c4df2452 100644 (file)
@@ -412,9 +412,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
        if (priv->afbcd.ops)
                priv->afbcd.ops->init(priv);
 
-       drm_mode_config_helper_resume(priv->drm);
-
-       return 0;
+       return drm_mode_config_helper_resume(priv->drm);
 }
 
 static int compare_of(struct device *dev, void *data)
index d1086b2a6892ceb1a15dfc90936fe500d5fae72a..05863b253d6889d47f274b63ad5243c9467a40f0 100644 (file)
@@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
                return ret;
 
        ret = qxl_release_reserve_list(release, true);
-       if (ret)
+       if (ret) {
+               qxl_release_free(qdev, release);
                return ret;
-
+       }
        cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_SURFACE_CMD_CREATE;
        cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
        /* no need to add a release to the fence for this surface bo,
           since it is only released when we ask to destroy the surface
           and it would never signal otherwise */
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
 
        surf->hw_surf_alloc = true;
        spin_lock(&qdev->surf_id_idr_lock);
@@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
        cmd->surface_id = id;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
 
        return 0;
 }
index 09583a08e1414381e759f91cb1c70d0ee26251f8..91f398d51cfadf4e69fd60125147d2d35eb905d1 100644 (file)
@@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
        cmd->u.set.visible = 1;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 
        return ret;
 
@@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
 
        qxl_release_unmap(qdev, release, &cmd->release_info);
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 
        if (old_cursor_bo != NULL)
                qxl_bo_unpin(old_cursor_bo);
@@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
        cmd->type = QXL_CURSOR_HIDE;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 }
 
 static void qxl_update_dumb_head(struct qxl_device *qdev,
index 5bebf1ea1c5d0c5b59b1cb3579dc62f93dba0cef..3599db096973e9fd2fb9d0b9e8e102053c31835b 100644 (file)
@@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
                goto out_release_backoff;
 
        rects = drawable_set_clipping(qdev, num_clips, clips_bo);
-       if (!rects)
+       if (!rects) {
+               ret = -EINVAL;
                goto out_release_backoff;
-
+       }
        drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
 
        drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
        }
        qxl_bo_kunmap(clips_bo);
 
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
 
 out_release_backoff:
        if (ret)
index 43688ecdd8a04d9d4bc3286ae03259c9892626e8..60ab7151b84dce9a2d2d4aa9837345fde2951c5a 100644 (file)
@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
                break;
        default:
                DRM_ERROR("unsupported image bit depth\n");
-               return -EINVAL; /* TODO: cleanup */
+               qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+               return -EINVAL;
        }
        image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
        image->u.bitmap.x = width;
index 8117a45b36102192b3acd09e269faedfb06dc146..72f3f1bbb40c1de0cf3f6a77047530fa2e47ef85 100644 (file)
@@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
                        apply_surf_reloc(qdev, &reloc_info[i]);
        }
 
+       qxl_release_fence_buffer_objects(release);
        ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
-       if (ret)
-               qxl_release_backoff_reserve_list(release);
-       else
-               qxl_release_fence_buffer_objects(release);
 
 out_free_bos:
 out_free_release:
index 059939789730d893d089634e37daadb866c60387..3eb89f1eb0e1ad44bfde8cb12359111ef5ad68be 100644 (file)
@@ -717,7 +717,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
        struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
        struct mipi_dsi_device *device = dsi->device;
-       union phy_configure_opts opts = { };
+       union phy_configure_opts opts = { };
        struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
        u16 delay;
        int err;
index bd268028fb3d625c9c637b642a540d0e008dcfab..583cd6e0ae27faa454f32243de79b7a5d703ac8f 100644 (file)
@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
 
 static bool host1x_drm_wants_iommu(struct host1x_device *dev)
 {
+       struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
        struct iommu_domain *domain;
 
        /*
@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
         * sufficient and whether or not the host1x is attached to an IOMMU
         * doesn't matter.
         */
-       if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+       if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
                return true;
 
        return domain != NULL;
index c1824bdf2418218793b04c0e11ab0e7fefdbc3fa..7879ff58236f1472afb38d55de3ec1782e41fd7b 100644 (file)
@@ -221,6 +221,7 @@ struct virtio_gpu_fpriv {
 /* virtio_ioctl.c */
 #define DRM_VIRTIO_NUM_IOCTLS 10
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
 
 /* virtio_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
index 0d6152c99a27190f538be52eb777d7d4ce973cf8..f0d5a897467752aedbc1ba0ee8dbae6380da3bd2 100644 (file)
@@ -39,6 +39,9 @@ int virtio_gpu_gem_create(struct drm_file *file,
        int ret;
        u32 handle;
 
+       if (vgdev->has_virgl_3d)
+               virtio_gpu_create_context(dev, file);
+
        ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
        if (ret < 0)
                return ret;
index 3f60bf2fe05aba4922717453a1bd674b18dfa4f3..512daff920387e5103e37743f2f441641a762dd9 100644 (file)
@@ -34,8 +34,7 @@
 
 #include "virtgpu_drv.h"
 
-static void virtio_gpu_create_context(struct drm_device *dev,
-                                     struct drm_file *file)
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
index f4ea4cef5e23f7d2c1092853d47388c9e2668779..0a5c8cf409fb854f65b09a2d04e44b39c0fe79a4 100644 (file)
@@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
                      events_clear, &events_clear);
 }
 
-static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
-                                     uint32_t ctx_id)
-{
-       virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
-       virtio_gpu_notify(vgdev);
-       ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
-}
-
 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
                               void (*work_func)(struct work_struct *work))
 {
@@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct virtio_gpu_fpriv *vfpriv;
+       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 
        if (!vgdev->has_virgl_3d)
                return;
 
-       vfpriv = file->driver_priv;
+       if (vfpriv->context_created) {
+               virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
+               virtio_gpu_notify(vgdev);
+       }
 
-       virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+       ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
        mutex_destroy(&vfpriv->context_lock);
        kfree(vfpriv);
        file->driver_priv = NULL;
index 8cdcd6e5f9e1e2877ce9a7afe4d322771248e230..3596f3923ea3527e1f7033b61c45d7ba9c58d53e 100644 (file)
@@ -850,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
 extern int vmw_bo_init(struct vmw_private *dev_priv,
                       struct vmw_buffer_object *vmw_bo,
                       size_t size, struct ttm_placement *placement,
-                      bool interuptable,
+                      bool interruptible,
                       void (*bo_free)(struct ttm_buffer_object *bo));
 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
                                     struct ttm_object_file *tfile);
index 178a6cd1a06fe77535c470d96c736848bd3131ca..0f8d293971576a4f6d2faf78c6e24e8825457a58 100644 (file)
@@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
        struct vmw_fence_manager *fman = fman_from_fence(fence);
 
        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
-               return 1;
+               return true;
 
        vmw_fences_update(fman);
 
index 7ef51fa84b018cba77805197a47a6afb2a250067..126f93c0b0b89b5898853ab9329902a9cec9af3b 100644 (file)
@@ -1651,7 +1651,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
        struct vmw_surface_metadata *metadata;
        struct ttm_base_object *base;
        uint32_t backup_handle;
-       int ret = -EINVAL;
+       int ret;
 
        ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
                                           req->handle_type, &base);
index 388bcc2889aaf2e763a702e3e4c4aafd8434f080..d24344e919227251e5ab48f95662ae1dbbb38a6a 100644 (file)
@@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host)
        }
 }
 
+static bool host1x_wants_iommu(struct host1x *host1x)
+{
+       /*
+        * If we support addressing a maximum of 32 bits of physical memory
+        * and if the host1x firewall is enabled, there's no need to enable
+        * IOMMU support. This can happen for example on Tegra20, Tegra30
+        * and Tegra114.
+        *
+        * Tegra124 and later can address up to 34 bits of physical memory and
+        * many platforms come equipped with more than 2 GiB of system memory,
+        * which requires crossing the 4 GiB boundary. But there's a catch: on
+        * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
+        * only address up to 32 bits of memory in GATHER opcodes, which means
+        * that command buffers need to either be in the first 2 GiB of system
+        * memory (which could quickly lead to memory exhaustion), or command
+        * buffers need to be treated differently from other buffers (which is
+        * not possible with the current ABI).
+        *
+        * A third option is to use the IOMMU in these cases to make sure all
+        * buffers will be mapped into a 32-bit IOVA space that host1x can
+        * address. This allows all of the system memory to be used and works
+        * within the limitations of the host1x on these SoCs.
+        *
+        * In summary, default to enable IOMMU on Tegra124 and later. For any
+        * of the earlier SoCs, only use the IOMMU for additional safety when
+        * the host1x firewall is disabled.
+        */
+       if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
+               if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+                       return false;
+       }
+
+       return true;
+}
+
 static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
        int err;
 
        /*
-        * If the host1x firewall is enabled, there's no need to enable IOMMU
-        * support. Similarly, if host1x is already attached to an IOMMU (via
-        * the DMA API), don't try to attach again.
+        * We may not always want to enable IOMMU support (for example if the
+        * host1x firewall is already enabled and we don't support addressing
+        * more than 32 bits of physical memory), so check for that first.
+        *
+        * Similarly, if host1x is already attached to an IOMMU (via the DMA
+        * API), don't try to attach again.
         */
-       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+       if (!host1x_wants_iommu(host) || domain)
                return domain;
 
        host->group = iommu_group_get(host->dev);
@@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void)
 }
 module_exit(tegra_host1x_exit);
 
+/**
+ * host1x_get_dma_mask() - query the supported DMA mask for host1x
+ * @host1x: host1x instance
+ *
+ * Note that this returns the supported DMA mask for host1x, which can be
+ * different from the applicable DMA mask under certain circumstances.
+ */
+u64 host1x_get_dma_mask(struct host1x *host1x)
+{
+       return host1x->info->dma_mask;
+}
+EXPORT_SYMBOL(host1x_get_dma_mask);
+
 MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
 MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
 MODULE_DESCRIPTION("Host1x driver for Tegra products");
index 7c89edbd6c5a8ae224eeca99b7b7e3440b238e11..34f07371716de2da9a25678aa99959099697a3dc 100644 (file)
@@ -1155,6 +1155,7 @@ config HID_ALPS
 config HID_MCP2221
        tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
        depends on USB_HID && I2C
+       depends on GPIOLIB
        ---help---
        Provides I2C and SMBUS host adapter functionality over USB-HID
        through MCP2221 device.
index fa704153cb00d5f7fa7d0185acd1f953fdd37754..b2ad319a74b9ac228183779324371cc3fd5b7cb7 100644 (file)
@@ -802,6 +802,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
                break;
        case HID_DEVICE_ID_ALPS_U1_DUAL:
        case HID_DEVICE_ID_ALPS_U1:
+       case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY:
                data->dev_type = U1;
                break;
        default:
index b18b13147a6fe5405399ef4f47b58931058eb3ae..1c71a1aa76b22b4f132004169839e2f91659e3a0 100644 (file)
 #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP    0x1220
 #define HID_DEVICE_ID_ALPS_U1          0x1215
+#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY         0x121E
 #define HID_DEVICE_ID_ALPS_T4_BTNLESS  0x120C
 #define HID_DEVICE_ID_ALPS_1222                0x1222
 
-
 #define USB_VENDOR_ID_AMI              0x046b
 #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
 
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349      0x7349
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7      0x73f7
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001      0xa001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002      0xc002
 
 #define USB_VENDOR_ID_ELAN             0x04f3
 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W        0x0401
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2      0xc218
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2    0xc219
 #define USB_DEVICE_ID_LOGITECH_G15_LCD         0xc222
+#define USB_DEVICE_ID_LOGITECH_G11             0xc225
 #define USB_DEVICE_ID_LOGITECH_G15_V2_LCD      0xc227
 #define USB_DEVICE_ID_LOGITECH_G510            0xc22d
 #define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO  0xc22e
 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
 #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
 
+#define I2C_VENDOR_ID_SYNAPTICS     0x06cb
+#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393   0x7a13
+
 #define USB_VENDOR_ID_SYNAPTICS                0x06cb
 #define USB_DEVICE_ID_SYNAPTICS_TP     0x0001
 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index ad4b5412a9f493f858134f33b7165b70bec92211..ef0cbcd7540d564e91e8f70c03aa947a410ce1c9 100644 (file)
@@ -872,6 +872,10 @@ error_hw_stop:
 }
 
 static const struct hid_device_id lg_g15_devices[] = {
+       /* The G11 is a G15 without the LCD, treat it as a G15 */
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_G11),
+               .driver_data = LG_G15 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                         USB_DEVICE_ID_LOGITECH_G15_LCD),
                .driver_data = LG_G15 },
index 362805ddf377736927fa95493186698a8d5fad0f..03c720b47306320c6ed32df1e0da97f2cd61e403 100644 (file)
@@ -1922,6 +1922,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_EGALAX_SERIAL,
                MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
                        USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+       { .driver_data = MT_CLS_EGALAX,
+               MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
 
        /* Elitegroup panel */
        { .driver_data = MT_CLS_SERIAL,
index ebec818344aff5ca50164cef91f785e81d47b6b1..e4cb543de0cdc3524a33ac068536858e328a2bcc 100644 (file)
@@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
index 009000c5d55cddfaad101f1b33d5b37497e0f176..294c84e136d72686842516c9422ece66739b724e 100644 (file)
@@ -177,6 +177,8 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
                 I2C_HID_QUIRK_RESET_ON_RESUME },
+       { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
+                I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
        { 0, 0 }
index c7bc9db5b192e201d437f3adc1959224a1e11bee..17a638f150824a14ca837cac5fa298c52caf0c73 100644 (file)
@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
        struct usbhid_device *usbhid = hid->driver_data;
        int res;
 
+       mutex_lock(&usbhid->mutex);
+
        set_bit(HID_OPENED, &usbhid->iofl);
 
-       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
-               return 0;
+       if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
+               res = 0;
+               goto Done;
+       }
 
        res = usb_autopm_get_interface(usbhid->intf);
        /* the device must be awake to reliably request remote wakeup */
        if (res < 0) {
                clear_bit(HID_OPENED, &usbhid->iofl);
-               return -EIO;
+               res = -EIO;
+               goto Done;
        }
 
        usbhid->intf->needs_remote_wakeup = 1;
@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
                msleep(50);
 
        clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+
+ Done:
+       mutex_unlock(&usbhid->mutex);
        return res;
 }
 
@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
 
+       mutex_lock(&usbhid->mutex);
+
        /*
         * Make sure we don't restart data acquisition due to
         * a resumption we no longer care about by avoiding racing
@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
                clear_bit(HID_IN_POLLING, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
 
-       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
-               return;
+       if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
+               hid_cancel_delayed_stuff(usbhid);
+               usb_kill_urb(usbhid->urbin);
+               usbhid->intf->needs_remote_wakeup = 0;
+       }
 
-       hid_cancel_delayed_stuff(usbhid);
-       usb_kill_urb(usbhid->urbin);
-       usbhid->intf->needs_remote_wakeup = 0;
+       mutex_unlock(&usbhid->mutex);
 }
 
 /*
@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
        unsigned int n, insize = 0;
        int ret;
 
+       mutex_lock(&usbhid->mutex);
+
        clear_bit(HID_DISCONNECTED, &usbhid->iofl);
 
        usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
                usbhid_set_leds(hid);
                device_set_wakeup_enable(&dev->dev, 1);
        }
+
+       mutex_unlock(&usbhid->mutex);
        return 0;
 
 fail:
@@ -1187,6 +1202,7 @@ fail:
        usbhid->urbout = NULL;
        usbhid->urbctrl = NULL;
        hid_free_buffers(dev, hid);
+       mutex_unlock(&usbhid->mutex);
        return ret;
 }
 
@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
                usbhid->intf->needs_remote_wakeup = 0;
        }
 
+       mutex_lock(&usbhid->mutex);
+
        clear_bit(HID_STARTED, &usbhid->iofl);
        spin_lock_irq(&usbhid->lock);   /* Sync with error and led handlers */
        set_bit(HID_DISCONNECTED, &usbhid->iofl);
@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
        usbhid->urbout = NULL;
 
        hid_free_buffers(hid_to_usb_dev(hid), hid);
+
+       mutex_unlock(&usbhid->mutex);
 }
 
 static int usbhid_power(struct hid_device *hid, int lvl)
@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
        INIT_WORK(&usbhid->reset_work, hid_reset);
        timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
        spin_lock_init(&usbhid->lock);
+       mutex_init(&usbhid->mutex);
 
        ret = hid_add_device(hid);
        if (ret) {
index 8620408bd7afc8e2fe047f36b0420acd4db5d76d..75fe85d3d27a0a25d6f5aad7123bcf527ae13261 100644 (file)
@@ -80,6 +80,7 @@ struct usbhid_device {
        dma_addr_t outbuf_dma;                                          /* Output buffer dma */
        unsigned long last_out;                                                 /* record of last output for timeouts */
 
+       struct mutex mutex;                                             /* start/stop/open/close */
        spinlock_t lock;                                                /* fifo spinlock */
        unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
        struct timer_list io_retry;                                     /* Retry timer */
index 5ded94b7bf684b87359549444c6f1cdddc17bbf2..cd71e713394464632e4c2fa6e11de57eaccab455 100644 (file)
@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
                        data[0] = field->report->id;
                        ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
                                               data, n, WAC_CMD_RETRIES);
-                       if (ret == n) {
+                       if (ret == n && features->type == HID_GENERIC) {
                                ret = hid_report_raw_event(hdev,
                                        HID_FEATURE_REPORT, data, n, 0);
+                       } else if (ret == 2 && features->type != HID_GENERIC) {
+                               features->touch_max = data[1];
                        } else {
                                features->touch_max = 16;
                                hid_warn(hdev, "wacom_feature_mapping: "
index d99a9d407671c88ea96decdd0e561c6f8784685a..1c96809b51c909431255eb912d04156d7f5fd8aa 100644 (file)
@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
 {
        struct input_dev *pad_input = wacom->pad_input;
        unsigned char *data = wacom->data;
+       int nbuttons = wacom->features.numbered_buttons;
 
-       int buttons = data[282] | ((data[281] & 0x40) << 2);
+       int expresskeys = data[282];
+       int center = (data[281] & 0x40) >> 6;
        int ring = data[285] & 0x7F;
        bool ringstatus = data[285] & 0x80;
-       bool prox = buttons || ringstatus;
+       bool prox = expresskeys || center || ringstatus;
 
        /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
        ring = 71 - ring;
@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
        if (ring > 71)
                ring -= 72;
 
-       wacom_report_numbered_buttons(pad_input, 9, buttons);
+       wacom_report_numbered_buttons(pad_input, nbuttons,
+                                      expresskeys | (center << (nbuttons - 1)));
 
        input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
 
@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
                        case HID_DG_TIPSWITCH:
                                hid_data->last_slot_field = equivalent_usage;
                                break;
+                       case HID_DG_CONTACTCOUNT:
+                               hid_data->cc_report = report->id;
+                               hid_data->cc_index = i;
+                               hid_data->cc_value_index = j;
+                               break;
                        }
                }
        }
+
+       if (hid_data->cc_report != 0 &&
+           hid_data->cc_index >= 0) {
+               struct hid_field *field = report->field[hid_data->cc_index];
+               int value = field->value[hid_data->cc_value_index];
+               if (value)
+                       hid_data->num_expected = value;
+       }
+       else {
+               hid_data->num_expected = wacom_wac->features.touch_max;
+       }
 }
 
 static void wacom_wac_finger_report(struct hid_device *hdev,
@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        struct input_dev *input = wacom_wac->touch_input;
        unsigned touch_max = wacom_wac->features.touch_max;
-       struct hid_data *hid_data = &wacom_wac->hid_data;
 
        /* If more packets of data are expected, give us a chance to
         * process them rather than immediately syncing a partial
@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
 
        input_sync(input);
        wacom_wac->hid_data.num_received = 0;
-       hid_data->num_expected = 0;
 
        /* keep touch state for pen event */
        wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
        }
 }
 
-static void wacom_set_num_expected(struct hid_device *hdev,
-                                  struct hid_report *report,
-                                  int collection_index,
-                                  struct hid_field *field,
-                                  int field_index)
-{
-       struct wacom *wacom = hid_get_drvdata(hdev);
-       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-       struct hid_data *hid_data = &wacom_wac->hid_data;
-       unsigned int original_collection_level =
-               hdev->collection[collection_index].level;
-       bool end_collection = false;
-       int i;
-
-       if (hid_data->num_expected)
-               return;
-
-       // find the contact count value for this segment
-       for (i = field_index; i < report->maxfield && !end_collection; i++) {
-               struct hid_field *field = report->field[i];
-               unsigned int field_level =
-                       hdev->collection[field->usage[0].collection_index].level;
-               unsigned int j;
-
-               if (field_level != original_collection_level)
-                       continue;
-
-               for (j = 0; j < field->maxusage; j++) {
-                       struct hid_usage *usage = &field->usage[j];
-
-                       if (usage->collection_index != collection_index) {
-                               end_collection = true;
-                               break;
-                       }
-                       if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
-                               hid_data->cc_report = report->id;
-                               hid_data->cc_index = i;
-                               hid_data->cc_value_index = j;
-
-                               if (hid_data->cc_report != 0 &&
-                                   hid_data->cc_index >= 0) {
-
-                                       struct hid_field *field =
-                                               report->field[hid_data->cc_index];
-                                       int value =
-                                               field->value[hid_data->cc_value_index];
-
-                                       if (value)
-                                               hid_data->num_expected = value;
-                               }
-                       }
-               }
-       }
-
-       if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
-               hid_data->num_expected = wacom_wac->features.touch_max;
-}
-
 static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
                         int collection_index, struct hid_field *field,
                         int field_index)
 {
        struct wacom *wacom = hid_get_drvdata(hdev);
 
-       if (WACOM_FINGER_FIELD(field))
-               wacom_set_num_expected(hdev, report, collection_index, field,
-                                      field_index);
        wacom_report_events(hdev, report, collection_index, field_index);
 
        /*
index 6098e0cbdb4b0ad36f22fd214d2f0f59d08fec31..533c8b82b344d1b035a2cc78ff80476beab87701 100644 (file)
@@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu)
 
        shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
        shared_sint.masked = false;
-       if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
-               shared_sint.auto_eoi = false;
-       else
-               shared_sint.auto_eoi = true;
-
+       shared_sint.auto_eoi = hv_recommend_using_aeoi();
        hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
        /* Enable the global synic bit */
index e70783e33680f1ec2ffa6b2bf46b266093907e57..f9d14db980cb4d2b65158bdd6ece3c2f5e02516e 100644 (file)
@@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
                    __field(int, ret)
                    ),
            TP_fast_assign(
-                   memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
-                   memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+                   export_guid(__entry->guest_id, &msg->guest_endpoint_id);
+                   export_guid(__entry->host_id, &msg->host_service_id);
                    __entry->ret = ret;
                    ),
            TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
index a68bce4d0ddbe925beabc2ca6a3499c6e25ea202..e06c6b9555cfb6800bf67910730b79f5870359ed 100644 (file)
@@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
 
        return drv->resume(dev);
 }
+#else
+#define vmbus_suspend NULL
+#define vmbus_resume NULL
 #endif /* CONFIG_PM_SLEEP */
 
 /*
@@ -997,11 +1000,22 @@ static void vmbus_device_release(struct device *device)
 }
 
 /*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
+ * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
+ *
+ * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
+ * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
+ * is no way to wake up a Generation-2 VM.
+ *
+ * The other 4 ops are for hibernation.
  */
+
 static const struct dev_pm_ops vmbus_pm = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
+       .suspend_noirq  = NULL,
+       .resume_noirq   = NULL,
+       .freeze_noirq   = vmbus_suspend,
+       .thaw_noirq     = vmbus_resume,
+       .poweroff_noirq = vmbus_suspend,
+       .restore_noirq  = vmbus_resume,
 };
 
 /* The one and only one */
@@ -2281,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev)
 
        return 0;
 }
+#else
+#define vmbus_bus_suspend NULL
+#define vmbus_bus_resume NULL
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -2291,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
 
 /*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
- * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
- * pci "noirq" restore callback runs before "non-noirq" callbacks (see
+ * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
+ * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
+ * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
  * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
  * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
- * resume callback must also run via the "noirq" callbacks.
+ * resume callback must also run via the "noirq" ops.
+ *
+ * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
+ * earlier in this file before vmbus_pm.
  */
+
 static const struct dev_pm_ops vmbus_bus_pm = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
+       .suspend_noirq  = NULL,
+       .resume_noirq   = NULL,
+       .freeze_noirq   = vmbus_bus_suspend,
+       .thaw_noirq     = vmbus_bus_resume,
+       .poweroff_noirq = vmbus_bus_suspend,
+       .restore_noirq  = vmbus_bus_resume
 };
 
 static struct acpi_driver vmbus_acpi_driver = {
index 53b517dbe7e6ecaffab8c2a85053f82bc5caaf55..4af2fc309c28689ff4214f186fc3b27a38dad2c8 100644 (file)
@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
        int channel = to_sensor_dev_attr(devattr)->index;
        int ret;
 
-       mutex_lock(&hwmon->hwmon_lock);
+       mutex_lock(&hwmon->da9052->auxadc_lock);
        ret = __da9052_read_tsi(dev, channel);
-       mutex_unlock(&hwmon->hwmon_lock);
+       mutex_unlock(&hwmon->da9052->auxadc_lock);
 
        if (ret < 0)
                return ret;
index 9179460c2d9d586e1358859a485234126aa7bc8c..0d4f3d97ffc61f8af1d4d4dff6c269353fcd5c9a 100644 (file)
@@ -346,7 +346,7 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
        st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
 
        if (!have_sct_data_table)
-               goto skip_sct;
+               goto skip_sct_data;
 
        /* Request and read temperature history table */
        memset(buf, '\0', sizeof(st->smartdata));
index 1f5743d68984cfb93ef9ea25084f3db6e1fcbf8a..a7eb10d2a0539029bb301fb936491618e4177128 100644 (file)
@@ -41,6 +41,7 @@
 #define FANCTL_MAX             4       /* Counted from 1 */
 #define TCPU_MAX               8       /* Counted from 1 */
 #define TEMP_MAX               4       /* Counted from 1 */
+#define SMI_STS_MAX            10      /* Counted from 1 */
 
 #define VT_ADC_CTRL0_REG       0x20    /* Bank 0 */
 #define VT_ADC_CTRL1_REG       0x21    /* Bank 0 */
@@ -361,6 +362,7 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
        struct nct7904_data *data = dev_get_drvdata(dev);
        int ret, temp;
        unsigned int reg1, reg2, reg3;
+       s8 temps;
 
        switch (attr) {
        case hwmon_temp_input:
@@ -466,7 +468,8 @@ static int nct7904_read_temp(struct device *dev, u32 attr, int channel,
 
        if (ret < 0)
                return ret;
-       *val = ret * 1000;
+       temps = ret;
+       *val = temps * 1000;
        return 0;
 }
 
@@ -1009,6 +1012,13 @@ static int nct7904_probe(struct i2c_client *client,
                data->fan_mode[i] = ret;
        }
 
+       /* Read all of SMI status register to clear alarms */
+       for (i = 0; i < SMI_STS_MAX; i++) {
+               ret = nct7904_read_reg(data, BANK_0, SMI_STS1_REG + i);
+               if (ret < 0)
+                       return ret;
+       }
+
        hwmon_dev =
                devm_hwmon_device_register_with_info(dev, client->name, data,
                                                     &nct7904_chip_info, NULL);
index b44d83142b620a76882ac8028db0a707c4896087..2fdaeec80ee5b701c1df24d0109ad0ba13c78220 100644 (file)
@@ -120,7 +120,7 @@ static int cti_plat_create_v8_etm_connection(struct device *dev,
 
        /* Can optionally have an etm node - return if not  */
        cs_fwnode = fwnode_find_reference(root_fwnode, CTI_DT_CSDEV_ASSOC, 0);
-       if (IS_ERR_OR_NULL(cs_fwnode))
+       if (IS_ERR(cs_fwnode))
                return 0;
 
        /* allocate memory */
@@ -393,7 +393,7 @@ static int cti_plat_create_connection(struct device *dev,
                /* associated device ? */
                cs_fwnode = fwnode_find_reference(fwnode,
                                                  CTI_DT_CSDEV_ASSOC, 0);
-               if (!IS_ERR_OR_NULL(cs_fwnode)) {
+               if (!IS_ERR(cs_fwnode)) {
                        assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
                                                                     &csdev);
                        fwnode_handle_put(cs_fwnode);
index dff4e178c732f4f3c77591da08716b80e0d9e854..7f10312d1b88f55ff605fb454d580c36cb5ecb7e 100644 (file)
@@ -542,7 +542,7 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap)
 EXPORT_SYMBOL(i2c_pca_add_numbered_bus);
 
 MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, "
-       "Wolfram Sang <w.sang@pengutronix.de>");
+       "Wolfram Sang <kernel@pengutronix.de>");
 MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm");
 MODULE_LICENSE("GPL");
 
index f5c00f903df3833a08b4bc90b8456d7cbcf52d6d..16ddc26c00e65698bf4f9986e8e02667326cf58c 100644 (file)
@@ -70,6 +70,7 @@
  * @isr_mask: cached copy of local ISR enables.
  * @isr_status: cached copy of local ISR status.
  * @lock: spinlock for IRQ synchronization.
+ * @isr_mutex: mutex for IRQ thread.
  */
 struct altr_i2c_dev {
        void __iomem *base;
@@ -86,6 +87,7 @@ struct altr_i2c_dev {
        u32 isr_mask;
        u32 isr_status;
        spinlock_t lock;        /* IRQ synchronization */
+       struct mutex isr_mutex;
 };
 
 static void
@@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
        struct altr_i2c_dev *idev = _dev;
        u32 status = idev->isr_status;
 
+       mutex_lock(&idev->isr_mutex);
        if (!idev->msg) {
                dev_warn(idev->dev, "unexpected interrupt\n");
                altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
-               return IRQ_HANDLED;
+               goto out;
        }
        read = (idev->msg->flags & I2C_M_RD) != 0;
 
@@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
                complete(&idev->msg_complete);
                dev_dbg(idev->dev, "Message Complete\n");
        }
+out:
+       mutex_unlock(&idev->isr_mutex);
 
        return IRQ_HANDLED;
 }
@@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
        u32 value;
        u8 addr = i2c_8bit_addr_from_msg(msg);
 
+       mutex_lock(&idev->isr_mutex);
        idev->msg = msg;
        idev->msg_len = msg->len;
        idev->buf = msg->buf;
@@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
                altr_i2c_int_enable(idev, imask, true);
                altr_i2c_fill_tx_fifo(idev);
        }
+       mutex_unlock(&idev->isr_mutex);
 
        time_left = wait_for_completion_timeout(&idev->msg_complete,
                                                ALTR_I2C_XFER_TIMEOUT);
@@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
        idev->dev = &pdev->dev;
        init_completion(&idev->msg_complete);
        spin_lock_init(&idev->lock);
+       mutex_init(&idev->isr_mutex);
 
        ret = device_property_read_u32(idev->dev, "fifo-size",
                                       &idev->fifo_size);
index 5e4800d72e00bc5a4c7a5b98294d22f99a093168..cd3fd5ee5f65d28815b9f2e22981f9f44b50b389 100644 (file)
@@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
        if (!privdata)
                return -ENOMEM;
 
+       privdata->pci_dev = pci_dev;
        rc = amd_mp2_pci_init(privdata, pci_dev);
        if (rc)
                return rc;
 
        mutex_init(&privdata->c2p_lock);
-       privdata->pci_dev = pci_dev;
 
        pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
        pm_runtime_use_autosuspend(&pci_dev->dev);
index 07c1993274c50217da6c1ead198fe3bf857f539f..f51702d86a90e708f11f1d27b89f9a98b49ed1cc 100644 (file)
@@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
        /* Ack all interrupts except for Rx done */
        writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
               bus->base + ASPEED_I2C_INTR_STS_REG);
+       readl(bus->base + ASPEED_I2C_INTR_STS_REG);
        irq_remaining = irq_received;
 
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
@@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
                        irq_received, irq_handled);
 
        /* Ack Rx done */
-       if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
+       if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
                writel(ASPEED_I2CD_INTR_RX_DONE,
                       bus->base + ASPEED_I2C_INTR_STS_REG);
+               readl(bus->base + ASPEED_I2C_INTR_STS_REG);
+       }
        spin_unlock(&bus->lock);
        return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
 }
index 0aba51a7df327a70b19a6a160c8492dbdf13814b..37b96ac9dfaeec9fb84f1b2d5368e063ffa5bd2c 100644 (file)
@@ -845,6 +845,18 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
                                                         PINCTRL_STATE_DEFAULT);
        dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
                                                      "gpio");
+       if (IS_ERR(dev->pinctrl_pins_default) ||
+           IS_ERR(dev->pinctrl_pins_gpio)) {
+               dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
+               return -EINVAL;
+       }
+
+       /*
+        * pins will be taken as GPIO, so we might as well inform pinctrl about
+        * this and move the state to GPIO
+        */
+       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
+
        rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
        if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
                return -EPROBE_DEFER;
@@ -855,9 +867,7 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
                return -EPROBE_DEFER;
 
        if (IS_ERR(rinfo->sda_gpiod) ||
-           IS_ERR(rinfo->scl_gpiod) ||
-           IS_ERR(dev->pinctrl_pins_default) ||
-           IS_ERR(dev->pinctrl_pins_gpio)) {
+           IS_ERR(rinfo->scl_gpiod)) {
                dev_info(&pdev->dev, "recovery information incomplete\n");
                if (!IS_ERR(rinfo->sda_gpiod)) {
                        gpiod_put(rinfo->sda_gpiod);
@@ -867,9 +877,13 @@ static int at91_init_twi_recovery_info(struct platform_device *pdev,
                        gpiod_put(rinfo->scl_gpiod);
                        rinfo->scl_gpiod = NULL;
                }
+               pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
                return -EINVAL;
        }
 
+       /* change the state of the pins back to their default state */
+       pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
+
        dev_info(&pdev->dev, "using scl, sda for recovery\n");
 
        rinfo->prepare_recovery = at91_prepare_twi_recovery;
index 44be0926b566395798340dc23e59278cbe2310f0..d091a12596ad2c66367d59f6a412fa3b3fdaba7c 100644 (file)
@@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
                        value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
                        i2c_slave_event(iproc_i2c->slave,
                                        I2C_SLAVE_WRITE_RECEIVED, &value);
+                       if (rx_status == I2C_SLAVE_RX_END)
+                               i2c_slave_event(iproc_i2c->slave,
+                                               I2C_SLAVE_STOP, &value);
                }
        } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
                /* Master read other than start */
index 8280ac7cc1b7df4fc2b902542027559405e4d55f..4c4d17ddc96b964125f89b49c13dd1f44f8bc457 100644 (file)
@@ -996,13 +996,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
        do {
                u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
 
-               if (status)
+               if (status) {
                        tegra_i2c_isr(i2c_dev->irq, i2c_dev);
 
-               if (completion_done(complete)) {
-                       s64 delta = ktime_ms_delta(ktimeout, ktime);
+                       if (completion_done(complete)) {
+                               s64 delta = ktime_ms_delta(ktimeout, ktime);
 
-                       return msecs_to_jiffies(delta) ?: 1;
+                               return msecs_to_jiffies(delta) ?: 1;
+                       }
                }
 
                ktime = ktime_get();
@@ -1029,18 +1030,14 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
                disable_irq(i2c_dev->irq);
 
                /*
-                * Under some rare circumstances (like running KASAN +
-                * NFS root) CPU, which handles interrupt, may stuck in
-                * uninterruptible state for a significant time.  In this
-                * case we will get timeout if I2C transfer is running on
-                * a sibling CPU, despite of IRQ being raised.
-                *
-                * In order to handle this rare condition, the IRQ status
-                * needs to be checked after timeout.
+                * There is a chance that completion may happen after IRQ
+                * synchronization, which is done by disable_irq().
                 */
-               if (ret == 0)
-                       ret = tegra_i2c_poll_completion_timeout(i2c_dev,
-                                                               complete, 0);
+               if (ret == 0 && completion_done(complete)) {
+                       dev_warn(i2c_dev->dev,
+                                "completion done after timeout\n");
+                       ret = 1;
+               }
        }
 
        return ret;
@@ -1219,15 +1216,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
                time_left = tegra_i2c_wait_completion_timeout(
                                i2c_dev, &i2c_dev->dma_complete, xfer_time);
 
-               /*
-                * Synchronize DMA first, since dmaengine_terminate_sync()
-                * performs synchronization after the transfer's termination
-                * and we want to get a completion if transfer succeeded.
-                */
-               dmaengine_synchronize(i2c_dev->msg_read ?
-                                     i2c_dev->rx_dma_chan :
-                                     i2c_dev->tx_dma_chan);
-
                dmaengine_terminate_sync(i2c_dev->msg_read ?
                                         i2c_dev->rx_dma_chan :
                                         i2c_dev->tx_dma_chan);
index a6691278206474374c68a5b8786d4cf48f92bd34..1f1442dfcad7b0c478e261932a1b4330cd208cd6 100644 (file)
@@ -7,7 +7,7 @@
  *   Mux support by Rodolfo Giometti <giometti@enneenne.com> and
  *   Michael Lawnick <michael.lawnick.ext@nsn.com>
  *
- * Copyright (C) 2013-2017 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013-2017 Wolfram Sang <wsa@kernel.org>
  */
 
 #define pr_fmt(fmt) "i2c-core: " fmt
@@ -338,8 +338,10 @@ static int i2c_device_probe(struct device *dev)
                } else if (ACPI_COMPANION(dev)) {
                        irq = i2c_acpi_get_irq(client);
                }
-               if (irq == -EPROBE_DEFER)
-                       return irq;
+               if (irq == -EPROBE_DEFER) {
+                       status = irq;
+                       goto put_sync_adapter;
+               }
 
                if (irq < 0)
                        irq = 0;
@@ -353,15 +355,19 @@ static int i2c_device_probe(struct device *dev)
         */
        if (!driver->id_table &&
            !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
-           !i2c_of_match_device(dev->driver->of_match_table, client))
-               return -ENODEV;
+           !i2c_of_match_device(dev->driver->of_match_table, client)) {
+               status = -ENODEV;
+               goto put_sync_adapter;
+       }
 
        if (client->flags & I2C_CLIENT_WAKE) {
                int wakeirq;
 
                wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
-               if (wakeirq == -EPROBE_DEFER)
-                       return wakeirq;
+               if (wakeirq == -EPROBE_DEFER) {
+                       status = wakeirq;
+                       goto put_sync_adapter;
+               }
 
                device_init_wakeup(&client->dev, true);
 
@@ -408,6 +414,10 @@ err_detach_pm_domain:
 err_clear_wakeup_irq:
        dev_pm_clear_wake_irq(&client->dev);
        device_init_wakeup(&client->dev, false);
+put_sync_adapter:
+       if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+               pm_runtime_put_sync(&client->adapter->dev);
+
        return status;
 }
 
index 6787c1f7148353732b1ed5ee144271d4f5b91f9a..3ed74aa4b44bb835a9296945af14ca1b98f30ea8 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (C) 2008 Jochen Friedrich <jochen@scram.de>
  * based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
  *
- * Copyright (C) 2013, 2018 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013, 2018 Wolfram Sang <wsa@kernel.org>
  */
 
 #include <dt-bindings/i2c/i2c.h>
index 0e16490eb3a10bcaddc79c4e62c721e7cadf0b3d..5365199a31f419b9db442ba3fa555e3a3ed044fc 100644 (file)
@@ -272,6 +272,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
 err_rollback_available:
        device_remove_file(&pdev->dev, &dev_attr_available_masters);
 err_rollback:
+       i2c_demux_deactivate_master(priv);
        for (j = 0; j < i; j++) {
                of_node_put(priv->chan[j].parent_np);
                of_changeset_destroy(&priv->chan[j].chgset);
index 66d768d971e1e6212219b219673399f9fe4eded0..6e429072e44a489b6ccd11a2085580d80449c533 100644 (file)
@@ -980,7 +980,7 @@ static int sca3000_read_data(struct sca3000_state *st,
        st->tx[0] = SCA3000_READ_REG(reg_address_high);
        ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
        if (ret) {
-               dev_err(get_device(&st->us->dev), "problem reading register");
+               dev_err(&st->us->dev, "problem reading register\n");
                return ret;
        }
 
index ae622ee6d08cbb7d77ec68b9f01d077de6ddee8a..dfc3a306c6677fe3540df532d31fc6b52c07730c 100644 (file)
@@ -1812,18 +1812,18 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
        return 0;
 }
 
-static int stm32_adc_dma_request(struct iio_dev *indio_dev)
+static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
 {
        struct stm32_adc *adc = iio_priv(indio_dev);
        struct dma_slave_config config;
        int ret;
 
-       adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+       adc->dma_chan = dma_request_chan(dev, "rx");
        if (IS_ERR(adc->dma_chan)) {
                ret = PTR_ERR(adc->dma_chan);
                if (ret != -ENODEV) {
                        if (ret != -EPROBE_DEFER)
-                               dev_err(&indio_dev->dev,
+                               dev_err(dev,
                                        "DMA channel request failed with %d\n",
                                        ret);
                        return ret;
@@ -1930,7 +1930,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
        if (ret < 0)
                return ret;
 
-       ret = stm32_adc_dma_request(indio_dev);
+       ret = stm32_adc_dma_request(dev, indio_dev);
        if (ret < 0)
                return ret;
 
index 76a60d93fe23f148791adb6e78b663a8b7363156..506bf519f64ca43fb6d2e6b8278945de862051a2 100644 (file)
@@ -62,7 +62,7 @@ enum sd_converter_type {
 
 struct stm32_dfsdm_dev_data {
        int type;
-       int (*init)(struct iio_dev *indio_dev);
+       int (*init)(struct device *dev, struct iio_dev *indio_dev);
        unsigned int num_channels;
        const struct regmap_config *regmap_cfg;
 };
@@ -1365,11 +1365,12 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
        }
 }
 
-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+static int stm32_dfsdm_dma_request(struct device *dev,
+                                  struct iio_dev *indio_dev)
 {
        struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
 
-       adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
+       adc->dma_chan = dma_request_chan(dev, "rx");
        if (IS_ERR(adc->dma_chan)) {
                int ret = PTR_ERR(adc->dma_chan);
 
@@ -1425,7 +1426,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
                                          &adc->dfsdm->ch_list[ch->channel]);
 }
 
-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
 {
        struct iio_chan_spec *ch;
        struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1452,10 +1453,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
        indio_dev->num_channels = 1;
        indio_dev->channels = ch;
 
-       return stm32_dfsdm_dma_request(indio_dev);
+       return stm32_dfsdm_dma_request(dev, indio_dev);
 }
 
-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
 {
        struct iio_chan_spec *ch;
        struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
@@ -1499,17 +1500,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
        init_completion(&adc->completion);
 
        /* Optionally request DMA */
-       ret = stm32_dfsdm_dma_request(indio_dev);
+       ret = stm32_dfsdm_dma_request(dev, indio_dev);
        if (ret) {
                if (ret != -ENODEV) {
                        if (ret != -EPROBE_DEFER)
-                               dev_err(&indio_dev->dev,
+                               dev_err(dev,
                                        "DMA channel request failed with %d\n",
                                        ret);
                        return ret;
                }
 
-               dev_dbg(&indio_dev->dev, "No DMA support\n");
+               dev_dbg(dev, "No DMA support\n");
                return 0;
        }
 
@@ -1622,7 +1623,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
                adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
 
        adc->dev_data = dev_data;
-       ret = dev_data->init(iio);
+       ret = dev_data->init(dev, iio);
        if (ret < 0)
                return ret;
 
index abe4b56c847c7db3c362f0c1d5fea0159dacf74b..8a8792010c20b3dfe77e4dc79bb333cd9d038d30 100644 (file)
@@ -32,16 +32,17 @@ struct ads8344 {
        u8 rx_buf[3];
 };
 
-#define ADS8344_VOLTAGE_CHANNEL(chan, si)                              \
+#define ADS8344_VOLTAGE_CHANNEL(chan, addr)                            \
        {                                                               \
                .type = IIO_VOLTAGE,                                    \
                .indexed = 1,                                           \
                .channel = chan,                                        \
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),           \
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),   \
+               .address = addr,                                        \
        }
 
-#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si)                 \
+#define ADS8344_VOLTAGE_CHANNEL_DIFF(chan1, chan2, addr)               \
        {                                                               \
                .type = IIO_VOLTAGE,                                    \
                .indexed = 1,                                           \
@@ -50,6 +51,7 @@ struct ads8344 {
                .differential = 1,                                      \
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),           \
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),   \
+               .address = addr,                                        \
        }
 
 static const struct iio_chan_spec ads8344_channels[] = {
@@ -105,7 +107,7 @@ static int ads8344_read_raw(struct iio_dev *iio,
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
                mutex_lock(&adc->lock);
-               *value = ads8344_adc_conversion(adc, channel->scan_index,
+               *value = ads8344_adc_conversion(adc, channel->address,
                                                channel->differential);
                mutex_unlock(&adc->lock);
                if (*value < 0)
index 82d470561ad3c6359f6aa37bb497f61457375230..7b199ce16ecf3555ed170315addbf61ec5ada165 100644 (file)
@@ -194,7 +194,19 @@ static const struct iio_chan_spec atlas_orp_channels[] = {
 };
 
 static const struct iio_chan_spec atlas_do_channels[] = {
-       ATLAS_CONCENTRATION_CHANNEL(0, ATLAS_REG_DO_DATA),
+       {
+               .type = IIO_CONCENTRATION,
+               .address = ATLAS_REG_DO_DATA,
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+               .scan_index = 0,
+               .scan_type = {
+                       .sign = 'u',
+                       .realbits = 32,
+                       .storagebits = 32,
+                       .endianness = IIO_BE,
+               },
+       },
        IIO_CHAN_SOFT_TIMESTAMP(1),
        {
                .type = IIO_TEMP,
index 71f8a5c471c43c1947d2a5ab3ec932e7c9513ff9..7f1e9317c3f3b44b2c9267f62b9d28d1b07ec204 100644 (file)
@@ -223,6 +223,7 @@ static int vf610_dac_probe(struct platform_device *pdev)
        return 0;
 
 error_iio_device_register:
+       vf610_dac_exit(info);
        clk_disable_unprepare(info->clk);
 
        return ret;
index 64ef07a307263a8335fe990c093f6bf4237f7aa8..1cf98195f84dd0ecd470e48a8fadb9c2ae9db9ef 100644 (file)
@@ -544,8 +544,10 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
 
                        ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
                        odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val);
-                       if (odr < 0)
-                               return odr;
+                       if (odr < 0) {
+                               err = odr;
+                               goto release;
+                       }
 
                        sensor->ext_info.slv_odr = val;
                        sensor->odr = odr;
@@ -557,6 +559,7 @@ st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
                break;
        }
 
+release:
        iio_device_release_direct_mode(iio_dev);
 
        return err;
index 717b798cddad4f7f7e5bceeadb46476565a9543f..a670209bbce6064135b499d567095f64caf47e01 100644 (file)
@@ -1553,8 +1553,11 @@ int ib_cache_setup_one(struct ib_device *device)
        if (err)
                return err;
 
-       rdma_for_each_port (device, p)
-               ib_cache_update(device, p, true);
+       rdma_for_each_port (device, p) {
+               err = ib_cache_update(device, p, true);
+               if (err)
+                       return err;
+       }
 
        return 0;
 }
index 4794113ecd596c3f551ed7dc08c0e9171191f82b..17f14e0eafe4d484e5657ad138cc89145b925621 100644 (file)
@@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
 
        ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
                                  &cm.local_id_next, GFP_KERNEL);
-       if (ret)
+       if (ret < 0)
                goto error;
        cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
 
@@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
 
 static void cm_format_rej(struct cm_rej_msg *rej_msg,
                          struct cm_id_private *cm_id_priv,
-                         enum ib_cm_rej_reason reason,
-                         void *ari,
-                         u8 ari_length,
-                         const void *private_data,
-                         u8 private_data_len)
+                         enum ib_cm_rej_reason reason, void *ari,
+                         u8 ari_length, const void *private_data,
+                         u8 private_data_len, enum ib_cm_state state)
 {
        lockdep_assert_held(&cm_id_priv->lock);
 
@@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
        IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
                be32_to_cpu(cm_id_priv->id.remote_id));
 
-       switch(cm_id_priv->id.state) {
+       switch (state) {
        case IB_CM_REQ_RCVD:
                IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
                IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
@@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
                              cm_id_priv->private_data_len);
                break;
        case IB_CM_TIMEWAIT:
-               cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
-                             IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
+               cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
+                             IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
+                             IB_CM_TIMEWAIT);
                break;
        default:
                goto unlock;
@@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                              u8 ari_length, const void *private_data,
                              u8 private_data_len)
 {
+       enum ib_cm_state state = cm_id_priv->id.state;
        struct ib_mad_send_buf *msg;
        int ret;
 
@@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
            (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
                return -EINVAL;
 
-       switch (cm_id_priv->id.state) {
+       switch (state) {
        case IB_CM_REQ_SENT:
        case IB_CM_MRA_REQ_RCVD:
        case IB_CM_REQ_RCVD:
@@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                if (ret)
                        return ret;
                cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
-                             ari, ari_length, private_data, private_data_len);
+                             ari, ari_length, private_data, private_data_len,
+                             state);
                break;
        case IB_CM_REP_SENT:
        case IB_CM_MRA_REP_RCVD:
@@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                if (ret)
                        return ret;
                cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
-                             ari, ari_length, private_data, private_data_len);
+                             ari, ari_length, private_data, private_data_len,
+                             state);
                break;
        default:
                pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
index 9eec26d10d7b1884bf0d15aa7f2d6fa1fed40a77..e16105be2eb238934655aa186693317e54b1375c 100644 (file)
@@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
 
        ret = fill_func(msg, has_cap_net_admin, res, port);
-
-       rdma_restrack_put(res);
        if (ret)
                goto err_free;
 
+       rdma_restrack_put(res);
        nlmsg_end(msg, nlh);
        ib_device_put(device);
        return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
index 5128cb16bb4856f83f10c7a2dcb758d935418b36..e0a5e897e4b1df04425421994e6a3673271d6121 100644 (file)
@@ -153,9 +153,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
        uobj->context = NULL;
 
        /*
-        * For DESTROY the usecnt is held write locked, the caller is expected
-        * to put it unlock and put the object when done with it. Only DESTROY
-        * can remove the IDR handle.
+        * For DESTROY the usecnt is not changed, the caller is expected to
+        * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+        * handle.
         */
        if (reason != RDMA_REMOVE_DESTROY)
                atomic_set(&uobj->usecnt, 0);
@@ -187,7 +187,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
 /*
  * This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
  * sequence. It should only be used from command callbacks. On success the
- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * caller must pair this with uobj_put_destroy(). This
  * version requires the caller to have already obtained an
  * LOOKUP_DESTROY uobject kref.
  */
@@ -198,6 +198,13 @@ int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
 
        down_read(&ufile->hw_destroy_rwsem);
 
+       /*
+        * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+        * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+        * This is because any other concurrent thread can still see the object
+        * in the xarray due to RCU. Leaving it locked ensures nothing else will
+        * touch it.
+        */
        ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
        if (ret)
                goto out_unlock;
@@ -216,7 +223,7 @@ out_unlock:
 /*
  * uobj_get_destroy destroys the HW object and returns a handle to the uobj
  * with a NULL object pointer. The caller must pair this with
- * uverbs_put_destroy.
+ * uobj_put_destroy().
  */
 struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
                                      u32 id, struct uverbs_attr_bundle *attrs)
@@ -250,8 +257,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
        uobj = __uobj_get_destroy(obj, id, attrs);
        if (IS_ERR(uobj))
                return PTR_ERR(uobj);
-
-       rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+       uobj_put_destroy(uobj);
        return 0;
 }
 
@@ -360,7 +366,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
         * uverbs_uobject_fd_release(), and the caller is expected to ensure
         * that release is never done while a call to lookup is possible.
         */
-       if (f->f_op != fd_type->fops) {
+       if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
                fput(f);
                return ERR_PTR(-EBADF);
        }
@@ -459,7 +465,8 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
        struct ib_uobject *uobj;
        struct file *filp;
 
-       if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release))
+       if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
+                   fd_type->fops->release != &uverbs_async_event_release))
                return ERR_PTR(-EINVAL);
 
        new_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -474,16 +481,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
                                  fd_type->flags);
        if (IS_ERR(filp)) {
+               uverbs_uobject_put(uobj);
                uobj = ERR_CAST(filp);
-               goto err_uobj;
+               goto err_fd;
        }
        uobj->object = filp;
 
        uobj->id = new_fd;
        return uobj;
 
-err_uobj:
-       uverbs_uobject_put(uobj);
 err_fd:
        put_unused_fd(new_fd);
        return uobj;
@@ -679,7 +685,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
                             enum rdma_lookup_mode mode)
 {
        assert_uverbs_usecnt(uobj, mode);
-       uobj->uapi_object->type_class->lookup_put(uobj, mode);
        /*
         * In order to unlock an object, either decrease its usecnt for
         * read access or zero it in case of exclusive access. See
@@ -696,6 +701,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
                break;
        }
 
+       uobj->uapi_object->type_class->lookup_put(uobj, mode);
        /* Pairs with the kref obtained by type->lookup_get */
        uverbs_uobject_put(uobj);
 }
index 7df71983212d6f9dfecb6ac8bc3455f29818e7db..3d189c7ee59e6663dd73c7387b6ef6b33e6b3edd 100644 (file)
@@ -219,6 +219,7 @@ void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
 void ib_uverbs_init_async_event_file(struct ib_uverbs_async_event_file *ev_file);
 void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue);
 void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+int uverbs_async_event_release(struct inode *inode, struct file *filp);
 
 int ib_alloc_ucontext(struct uverbs_attr_bundle *attrs);
 int ib_init_ucontext(struct uverbs_attr_bundle *attrs);
@@ -227,6 +228,9 @@ void ib_uverbs_release_ucq(struct ib_uverbs_completion_event_file *ev_file,
                           struct ib_ucq_object *uobj);
 void ib_uverbs_release_uevent(struct ib_uevent_object *uobj);
 void ib_uverbs_release_file(struct kref *ref);
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+                            __u64 element, __u64 event,
+                            struct list_head *obj_list, u32 *counter);
 
 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
index 2d4083bf4a048767775eea73680debe4351f20df..1bab8de14757416aef3af4254abfaea653811861 100644 (file)
@@ -346,7 +346,7 @@ const struct file_operations uverbs_async_event_fops = {
        .owner   = THIS_MODULE,
        .read    = ib_uverbs_async_event_read,
        .poll    = ib_uverbs_async_event_poll,
-       .release = uverbs_uobject_fd_release,
+       .release = uverbs_async_event_release,
        .fasync  = ib_uverbs_async_event_fasync,
        .llseek  = no_llseek,
 };
@@ -386,10 +386,9 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
        kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
 }
 
-static void
-ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
-                       __u64 element, __u64 event, struct list_head *obj_list,
-                       u32 *counter)
+void ib_uverbs_async_handler(struct ib_uverbs_async_event_file *async_file,
+                            __u64 element, __u64 event,
+                            struct list_head *obj_list, u32 *counter)
 {
        struct ib_uverbs_event *entry;
        unsigned long flags;
@@ -820,6 +819,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                        ret = mmget_not_zero(mm);
                        if (!ret) {
                                list_del_init(&priv->list);
+                               if (priv->entry) {
+                                       rdma_user_mmap_entry_put(priv->entry);
+                                       priv->entry = NULL;
+                               }
                                mm = NULL;
                                continue;
                        }
@@ -1183,9 +1186,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
                 */
                mutex_unlock(&uverbs_dev->lists_mutex);
 
-               ib_uverbs_async_handler(READ_ONCE(file->async_file), 0,
-                                       IB_EVENT_DEVICE_FATAL, NULL, NULL);
-
                uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
                kref_put(&file->ref, ib_uverbs_release_file);
 
index 82ec0806b34bd61b6ebf4eb248b03f6e9fbbdd8b..61899eaf1f91ff6a12ae5220f6d1e887293fc05c 100644 (file)
@@ -26,10 +26,38 @@ static int uverbs_async_event_destroy_uobj(struct ib_uobject *uobj,
                container_of(uobj, struct ib_uverbs_async_event_file, uobj);
 
        ib_unregister_event_handler(&event_file->event_handler);
-       ib_uverbs_free_event_queue(&event_file->ev_queue);
+
+       if (why == RDMA_REMOVE_DRIVER_REMOVE)
+               ib_uverbs_async_handler(event_file, 0, IB_EVENT_DEVICE_FATAL,
+                                       NULL, NULL);
        return 0;
 }
 
+int uverbs_async_event_release(struct inode *inode, struct file *filp)
+{
+       struct ib_uverbs_async_event_file *event_file;
+       struct ib_uobject *uobj = filp->private_data;
+       int ret;
+
+       if (!uobj)
+               return uverbs_uobject_fd_release(inode, filp);
+
+       event_file =
+               container_of(uobj, struct ib_uverbs_async_event_file, uobj);
+
+       /*
+        * The async event FD has to deliver IB_EVENT_DEVICE_FATAL even after
+        * disassociation, so cleaning the event list must only happen after
+        * release. The user knows it has reached the end of the event stream
+        * when it sees IB_EVENT_DEVICE_FATAL.
+        */
+       uverbs_uobject_get(uobj);
+       ret = uverbs_uobject_fd_release(inode, filp);
+       ib_uverbs_free_event_queue(&event_file->ev_queue);
+       uverbs_uobject_put(uobj);
+       return ret;
+}
+
 DECLARE_UVERBS_NAMED_METHOD(
        UVERBS_METHOD_ASYNC_EVENT_ALLOC,
        UVERBS_ATTR_FD(UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
index d69dece3b1d541ad3b834cc9ea128de7c9f20168..30e08bcc9afb53dfb45d83345939a2124c0b6eb6 100644 (file)
@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
                        srqidx = ABORT_RSS_SRQIDX_G(
                                        be32_to_cpu(req->srqidx_status));
                        if (srqidx) {
-                               complete_cached_srq_buffers(ep,
-                                                           req->srqidx_status);
+                               complete_cached_srq_buffers(ep, srqidx);
                        } else {
                                /* Hold ep ref until finish_peer_abort() */
                                c4iw_get_ep(&ep->com);
@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                return 0;
        }
 
-       ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
-                       TCB_RQ_START_S);
+       ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
+                                         TCB_RQ_START_S);
 cleanup:
        pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
 
index 13e4203497b33770cd9407df8b327d43f2fffd99..a92346e88628bd0a9d214df2b58e0aaf4e891ec9 100644 (file)
@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
 
        set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
        pq->state = SDMA_PKT_Q_ACTIVE;
-       /* Send the first N packets in the request to buy us some time */
-       ret = user_sdma_send_pkts(req, pcount);
-       if (unlikely(ret < 0 && ret != -EBUSY))
-               goto free_req;
 
        /*
         * This is a somewhat blocking send implementation.
index bb78d3280accdc7bd54a923db5e9a312217ceb12..fa7a5ff498c73bfd851030db4bfda3b010aca81b 100644 (file)
@@ -1987,7 +1987,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
        struct rtable *rt;
        struct neighbour *neigh;
        int rc = arpindex;
-       struct net_device *netdev = iwdev->netdev;
        __be32 dst_ipaddr = htonl(dst_ip);
        __be32 src_ipaddr = htonl(src_ip);
 
@@ -1997,9 +1996,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
                return rc;
        }
 
-       if (netif_is_bond_slave(netdev))
-               netdev = netdev_master_upper_dev_get(netdev);
-
        neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
 
        rcu_read_lock();
@@ -2065,7 +2061,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
 {
        struct neighbour *neigh;
        int rc = arpindex;
-       struct net_device *netdev = iwdev->netdev;
        struct dst_entry *dst;
        struct sockaddr_in6 dst_addr;
        struct sockaddr_in6 src_addr;
@@ -2086,9 +2081,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
                return rc;
        }
 
-       if (netif_is_bond_slave(netdev))
-               netdev = netdev_master_upper_dev_get(netdev);
-
        neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
 
        rcu_read_lock();
index e8b4b3743661da0c6cfb1681efbe174875f216d4..688f196672215ee4b6c5384443218bb50cc48286 100644 (file)
@@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
        u64 header;
 
        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
-       if (wqe)
+       if (!wqe)
                return I40IW_ERR_RING_FULL;
 
        set_64bit_val(wqe, 32, feat_mem->pa);
index 55a1fbf0e670c7fa1a775d94e35dd77ee07e0cba..ae8b97c3066575388c6d3381b0d76f03b5eb8910 100644 (file)
@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
        int arp_index;
 
        arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
-       if (arp_index == -1)
+       if (arp_index < 0)
                return;
        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
        if (!cqp_request)
index a66518a5c93868ac2ae3579c89736e1327417f1c..275722cec8c675690c573a000c452abc81f7d0f0 100644 (file)
@@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
        int i;
 
        for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+               union ib_flow_spec ib_spec = {};
                int ret;
-               union ib_flow_spec ib_spec;
+
                switch (pdefault_rules->rules_create_list[i]) {
                case 0:
                        /* no rule */
index 2f9f78912267d7eed77b387dd7d2835cd00d2b77..cf51e3cbd96919421927d4b5395a03fd5980082b 100644 (file)
@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
        int send_size;
        int header_size;
        int spc;
+       int err;
        int i;
 
        if (wr->wr.opcode != IB_WR_SEND)
@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
 
        sqp->ud_header.lrh.virtual_lane    = 0;
        sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
-       ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+       err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
+       if (err)
+               return err;
        sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
                sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
        }
        sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
        if (!sqp->qp.ibqp.qp_num)
-               ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
+               err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
+                                        &pkey);
        else
-               ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
+               err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
+                                        &pkey);
+       if (err)
+               return err;
+
        sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
        sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
index a401931189b75b13e3a37ea5d739f36134bc9a09..44683073be0c4a7988d6f3eb917936b444470de1 100644 (file)
@@ -1439,6 +1439,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        if (is_odp_mr(mr)) {
                to_ib_umem_odp(mr->umem)->private = mr;
+               init_waitqueue_head(&mr->q_deferred_work);
                atomic_set(&mr->num_deferred_work, 0);
                err = xa_err(xa_store(&dev->odp_mkeys,
                                      mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
index 1456db4b6295907c47e0b7f2d083819e58b49dc3..2210759843ba88e3e9033d0dd2572a2ab31c1839 100644 (file)
@@ -5558,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
        rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
        rdma_ah_set_static_rate(ah_attr,
                                path->static_rate ? path->static_rate - 5 : 0);
-       if (path->grh_mlid & (1 << 7)) {
+
+       if (path->grh_mlid & (1 << 7) ||
+           ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
                u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
 
                rdma_ah_set_grh(ah_attr, NULL,
index 568b21eb6ea158508207275f2626e1c46cbbd892..021df0654ba757e1df63d88d89a92035f072f3ab 100644 (file)
@@ -760,7 +760,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
                qib_dev_err(dd,
                        "Skipping linkcontrol sysfs info, (err %d) port %u\n",
                        ret, port_num);
-               goto bail;
+               goto bail_link;
        }
        kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
 
@@ -770,7 +770,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
                qib_dev_err(dd,
                        "Skipping sl2vl sysfs info, (err %d) port %u\n",
                        ret, port_num);
-               goto bail_link;
+               goto bail_sl;
        }
        kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
 
@@ -780,7 +780,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
                qib_dev_err(dd,
                        "Skipping diag_counters sysfs info, (err %d) port %u\n",
                        ret, port_num);
-               goto bail_sl;
+               goto bail_diagc;
        }
        kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
 
@@ -793,7 +793,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
                qib_dev_err(dd,
                 "Skipping Congestion Control sysfs info, (err %d) port %u\n",
                 ret, port_num);
-               goto bail_diagc;
+               goto bail_cc;
        }
 
        kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
@@ -854,6 +854,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
                                &cc_table_bin_attr);
                        kobject_put(&ppd->pport_cc_kobj);
                }
+               kobject_put(&ppd->diagc_kobj);
                kobject_put(&ppd->sl2vl_kobj);
                kobject_put(&ppd->pport_kobj);
        }
index e580ae9cc55a52143362f512e369d7717beaffe3..780fd2dfc07eb03200fede1ad914ab3900af2c87 100644 (file)
@@ -829,7 +829,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
            !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
                dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
                ret = -ENOMEM;
-               goto err_free_device;
+               goto err_disable_pdev;
        }
 
        ret = pci_request_regions(pdev, DRV_NAME);
index 5724cbbe38b1f2432a3309f3139db548998a6181..04d2e72017fedaefe23ddb25900f583240cac8ca 100644 (file)
@@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
                cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
-               if (!cq->ip) {
-                       err = -ENOMEM;
+               if (IS_ERR(cq->ip)) {
+                       err = PTR_ERR(cq->ip);
                        goto bail_wc;
                }
 
index 652f4a7efc1bed223d5052bf04eb46d296160888..37853aa3bcf7788eee80c219dc8f42e89b1eae20 100644 (file)
@@ -154,7 +154,7 @@ done:
  * @udata: user data (must be valid!)
  * @obj: opaque pointer to a cq, wq etc
  *
- * Return: rvt_mmap struct on success
+ * Return: rvt_mmap struct on success, ERR_PTR on failure
  */
 struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
                                           struct ib_udata *udata, void *obj)
@@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
 
        ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
        if (!ip)
-               return ip;
+               return ERR_PTR(-ENOMEM);
 
        size = PAGE_ALIGN(size);
 
index 0e1b291d2cec8cacaf5a5b0b71113d626572c77d..500a7ee04c44e2afd0ce165b0018a1b46fbb3c2a 100644 (file)
@@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 
                        qp->ip = rvt_create_mmap_info(rdi, s, udata,
                                                      qp->r_rq.wq);
-                       if (!qp->ip) {
-                               ret = ERR_PTR(-ENOMEM);
+                       if (IS_ERR(qp->ip)) {
+                               ret = ERR_CAST(qp->ip);
                                goto bail_qpn;
                        }
 
index 24fef021d51dbb825135d01c6840339ed6a805ee..f547c115af0369883b099699aa9981aa4f394d7f 100644 (file)
@@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
                u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
 
                srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
-               if (!srq->ip) {
-                       ret = -ENOMEM;
+               if (IS_ERR(srq->ip)) {
+                       ret = PTR_ERR(srq->ip);
                        goto bail_wq;
                }
 
index 48f48122ddcb8f8dc41d2692ab45e009514ef0e3..6a413d73b95dd84097a9fac9b2cdb3c29708a6d7 100644 (file)
@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
 
        ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        size = PAGE_ALIGN(size);
 
index ff92704de32ff2a8fd98a2d01b0973a9aa217059..245040c3a35d0e03112ce1f407cf3962e94d1637 100644 (file)
@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
 
        if (outbuf) {
                ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
-               if (!ip)
+               if (IS_ERR(ip)) {
+                       err = PTR_ERR(ip);
                        goto err1;
+               }
 
-               err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
-               if (err)
+               if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
+                       err = -EFAULT;
                        goto err2;
+               }
 
                spin_lock_bh(&rxe->pending_lock);
                list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
 err2:
        kfree(ip);
 err1:
-       return -EINVAL;
+       return err;
 }
 
 inline void rxe_queue_reset(struct rxe_queue *q)
index ae92c8080967c5bc66919c32db45390d091ac507..9f53aa4feb8782c0880120da0e26fe545e24f46e 100644 (file)
@@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
 {
        struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
        struct siw_device *sdev = to_siw_dev(pd->device);
-       struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey  >> 8);
+       struct siw_mem *mem;
        int rv = 0;
 
        siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
 
-       if (unlikely(!mem || !base_mr)) {
+       if (unlikely(!base_mr)) {
                pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
                return -EINVAL;
        }
+
        if (unlikely(base_mr->rkey >> 8 != sqe->rkey  >> 8)) {
                pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
-               rv = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
+
+       mem = siw_mem_id2obj(sdev, sqe->rkey  >> 8);
+       if (unlikely(!mem)) {
+               pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
+               return -EINVAL;
+       }
+
        if (unlikely(mem->pd != pd)) {
                pr_warn("siw: fastreg: PD mismatch\n");
                rv = -EINVAL;
index e188a95984b5c3c38800dc8b061494be874758c8..9a3379c49541fbe51a08e49db7757cc170bdd1bf 100644 (file)
@@ -377,8 +377,12 @@ struct ipoib_dev_priv {
        struct ipoib_rx_buf *rx_ring;
 
        struct ipoib_tx_buf *tx_ring;
+       /* cyclic ring variables for managing tx_ring, for UD only */
        unsigned int         tx_head;
        unsigned int         tx_tail;
+       /* cyclic ring variables for counting overall outstanding send WRs */
+       unsigned int         global_tx_head;
+       unsigned int         global_tx_tail;
        struct ib_sge        tx_sge[MAX_SKB_FRAGS + 1];
        struct ib_ud_wr      tx_wr;
        struct ib_wc         send_wc[MAX_SEND_CQE];
index c59e00a0881f19e6efae40145332743de8407ca7..9bf0fa30df28c9f76480bfcdf2b20846fe409864 100644 (file)
@@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                return;
        }
 
-       if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+       if ((priv->global_tx_head - priv->global_tx_tail) ==
+           ipoib_sendq_size - 1) {
                ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                          tx->qp->qp_num);
                netif_stop_queue(dev);
@@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
        } else {
                netif_trans_update(dev);
                ++tx->tx_head;
-               ++priv->tx_head;
+               ++priv->global_tx_head;
        }
 }
 
@@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        netif_tx_lock(dev);
 
        ++tx->tx_tail;
-       ++priv->tx_tail;
+       ++priv->global_tx_tail;
 
        if (unlikely(netif_queue_stopped(dev) &&
-                    (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+                    ((priv->global_tx_head - priv->global_tx_tail) <=
+                     ipoib_sendq_size >> 1) &&
                     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
                netif_wake_queue(dev);
 
@@ -1232,8 +1234,9 @@ timeout:
                dev_kfree_skb_any(tx_req->skb);
                netif_tx_lock_bh(p->dev);
                ++p->tx_tail;
-               ++priv->tx_tail;
-               if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
+               ++priv->global_tx_tail;
+               if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+                            ipoib_sendq_size >> 1) &&
                    netif_queue_stopped(p->dev) &&
                    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                        netif_wake_queue(p->dev);
index c332b47618160327966b493a7493e7006833d345..da3c5315bbb515649c7f265fe263f38cca5f9d6e 100644 (file)
@@ -407,9 +407,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        dev_kfree_skb_any(tx_req->skb);
 
        ++priv->tx_tail;
+       ++priv->global_tx_tail;
 
        if (unlikely(netif_queue_stopped(dev) &&
-                    ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+                    ((priv->global_tx_head - priv->global_tx_tail) <=
+                     ipoib_sendq_size >> 1) &&
                     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
                netif_wake_queue(dev);
 
@@ -634,7 +636,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
        else
                priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
        /* increase the tx_head after send success, but use it for queue state */
-       if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
+       if ((priv->global_tx_head - priv->global_tx_tail) ==
+           ipoib_sendq_size - 1) {
                ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
                netif_stop_queue(dev);
        }
@@ -662,6 +665,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
 
                rc = priv->tx_head;
                ++priv->tx_head;
+               ++priv->global_tx_head;
        }
        return rc;
 }
@@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
                                ipoib_dma_unmap_tx(priv, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
+                               ++priv->global_tx_tail;
                        }
 
                        for (i = 0; i < ipoib_recvq_size; ++i) {
index 81b8227214f1cf11ee622d16d3876e0eae7ed4d9..ceec24d451858adaac8c8503c5ccccb79c6a373c 100644 (file)
@@ -1184,9 +1184,11 @@ static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
 
        ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
                   jiffies_to_msecs(jiffies - dev_trans_start(dev)));
-       ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
-                  netif_queue_stopped(dev),
-                  priv->tx_head, priv->tx_tail);
+       ipoib_warn(priv,
+                  "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+                  netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+                  priv->global_tx_head, priv->global_tx_tail);
+
        /* XXX reset QP, etc. */
 }
 
@@ -1701,7 +1703,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
                goto out_rx_ring_cleanup;
        }
 
-       /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+       /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
 
        if (ipoib_transport_dev_init(dev, priv->ca)) {
                pr_warn("%s: ipoib_transport_dev_init failed\n",
index cb6e3a5f509c8a06ad03fc5bdf26d137189cbb9b..0d57e51b8ba1f8640063b043a4d1007e6376ab68 100644 (file)
@@ -326,20 +326,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
        return fasync_helper(fd, file, on, &client->fasync);
 }
 
-static int evdev_flush(struct file *file, fl_owner_t id)
-{
-       struct evdev_client *client = file->private_data;
-       struct evdev *evdev = client->evdev;
-
-       mutex_lock(&evdev->mutex);
-
-       if (evdev->exist && !client->revoked)
-               input_flush_device(&evdev->handle, file);
-
-       mutex_unlock(&evdev->mutex);
-       return 0;
-}
-
 static void evdev_free(struct device *dev)
 {
        struct evdev *evdev = container_of(dev, struct evdev, dev);
@@ -453,6 +439,10 @@ static int evdev_release(struct inode *inode, struct file *file)
        unsigned int i;
 
        mutex_lock(&evdev->mutex);
+
+       if (evdev->exist && !client->revoked)
+               input_flush_device(&evdev->handle, file);
+
        evdev_ungrab(evdev, client);
        mutex_unlock(&evdev->mutex);
 
@@ -1310,7 +1300,6 @@ static const struct file_operations evdev_fops = {
        .compat_ioctl   = evdev_ioctl_compat,
 #endif
        .fasync         = evdev_fasync,
-       .flush          = evdev_flush,
        .llseek         = no_llseek,
 };
 
index 6b40a1c68f9fe40eeb12a62ac40406885bcbc6ff..c77cdb3b62b5b4c7ed5ab4905bcdcf59ca761dec 100644 (file)
@@ -458,6 +458,16 @@ static const u8 xboxone_fw2015_init[] = {
        0x05, 0x20, 0x00, 0x01, 0x00
 };
 
+/*
+ * This packet is required for Xbox One S (0x045e:0x02ea)
+ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
+ * initialize the controller that was previously used in
+ * Bluetooth mode.
+ */
+static const u8 xboxone_s_init[] = {
+       0x05, 0x20, 0x00, 0x0f, 0x06
+};
+
 /*
  * This packet is required for the Titanfall 2 Xbox One pads
  * (0x0e6f:0x0165) to finish initialization and for Hori pads
@@ -516,6 +526,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+       XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
+       XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
        XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
        XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
index d38398526965dbfdeedcffdd58c016c123fa53b1..14362ebab9a9d8bcc1b6be958b1c02b3bcb8a46d 100644 (file)
@@ -186,7 +186,7 @@ struct touchpad_protocol {
        u8                      number_of_fingers;
        u8                      clicked2;
        u8                      unknown3[16];
-       struct tp_finger        fingers[0];
+       struct tp_finger        fingers[];
 };
 
 /**
index 2b71c5a51f907e338e1a785833967e0e5d24b969..fc1793ca2f1747511c04d40b79e2e3790fe39221 100644 (file)
@@ -347,18 +347,14 @@ static int cros_ec_keyb_info(struct cros_ec_device *ec_dev,
        params->info_type = info_type;
        params->event_type = event_type;
 
-       ret = cros_ec_cmd_xfer(ec_dev, msg);
-       if (ret < 0) {
-               dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
-                        (int)info_type, (int)event_type, ret);
-       } else if (msg->result == EC_RES_INVALID_VERSION) {
+       ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+       if (ret == -ENOTSUPP) {
                /* With older ECs we just return 0 for everything */
                memset(result, 0, result_size);
                ret = 0;
-       } else if (msg->result != EC_RES_SUCCESS) {
-               dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n",
-                        (int)info_type, (int)event_type, msg->result);
-               ret = -EPROTO;
+       } else if (ret < 0) {
+               dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
+                        (int)info_type, (int)event_type, ret);
        } else if (ret != result_size) {
                dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n",
                         (int)info_type, (int)event_type,
index b0ead7199c405a0e702ee478bfde94f0c6f433b0..a69dcc3bd30c79673f08e1763c7485bd34b80cfc 100644 (file)
@@ -143,7 +143,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
 
 static struct i2c_driver dir685_tk_i2c_driver = {
        .driver = {
-               .name   = "dlin-dir685-touchkeys",
+               .name   = "dlink-dir685-touchkeys",
                .of_match_table = of_match_ptr(dir685_tk_of_match),
        },
        .probe          = dir685_tk_probe,
index c8f87df93a50e3dae6673921baf4707daf1eb240..9c6386b2af331b1696def8535d6adb6b82b98c27 100644 (file)
@@ -205,8 +205,11 @@ ATTRIBUTE_GROUPS(axp20x);
 
 static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
 {
-       struct input_dev *idev = pwr;
-       struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
+       struct axp20x_pek *axp20x_pek = pwr;
+       struct input_dev *idev = axp20x_pek->input;
+
+       if (!idev)
+               return IRQ_HANDLED;
 
        /*
         * The power-button is connected to ground so a falling edge (dbf)
@@ -225,22 +228,9 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
 static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
                                         struct platform_device *pdev)
 {
-       struct axp20x_dev *axp20x = axp20x_pek->axp20x;
        struct input_dev *idev;
        int error;
 
-       axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
-       if (axp20x_pek->irq_dbr < 0)
-               return axp20x_pek->irq_dbr;
-       axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
-                                                 axp20x_pek->irq_dbr);
-
-       axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
-       if (axp20x_pek->irq_dbf < 0)
-               return axp20x_pek->irq_dbf;
-       axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
-                                                 axp20x_pek->irq_dbf);
-
        axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
        if (!axp20x_pek->input)
                return -ENOMEM;
@@ -255,24 +245,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
 
        input_set_drvdata(idev, axp20x_pek);
 
-       error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
-                                            axp20x_pek_irq, 0,
-                                            "axp20x-pek-dbr", idev);
-       if (error < 0) {
-               dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
-                       axp20x_pek->irq_dbr, error);
-               return error;
-       }
-
-       error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
-                                         axp20x_pek_irq, 0,
-                                         "axp20x-pek-dbf", idev);
-       if (error < 0) {
-               dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
-                       axp20x_pek->irq_dbf, error);
-               return error;
-       }
-
        error = input_register_device(idev);
        if (error) {
                dev_err(&pdev->dev, "Can't register input device: %d\n",
@@ -280,8 +252,6 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
                return error;
        }
 
-       device_init_wakeup(&pdev->dev, true);
-
        return 0;
 }
 
@@ -339,6 +309,18 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 
        axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
 
+       axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+       if (axp20x_pek->irq_dbr < 0)
+               return axp20x_pek->irq_dbr;
+       axp20x_pek->irq_dbr = regmap_irq_get_virq(
+                       axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
+
+       axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+       if (axp20x_pek->irq_dbf < 0)
+               return axp20x_pek->irq_dbf;
+       axp20x_pek->irq_dbf = regmap_irq_get_virq(
+                       axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
+
        if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
                error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
                if (error)
@@ -347,6 +329,26 @@ static int axp20x_pek_probe(struct platform_device *pdev)
 
        axp20x_pek->info = (struct axp20x_info *)match->driver_data;
 
+       error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+                                            axp20x_pek_irq, 0,
+                                            "axp20x-pek-dbr", axp20x_pek);
+       if (error < 0) {
+               dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+                       axp20x_pek->irq_dbr, error);
+               return error;
+       }
+
+       error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+                                         axp20x_pek_irq, 0,
+                                         "axp20x-pek-dbf", axp20x_pek);
+       if (error < 0) {
+               dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+                       axp20x_pek->irq_dbf, error);
+               return error;
+       }
+
+       device_init_wakeup(&pdev->dev, true);
+
        platform_set_drvdata(pdev, axp20x_pek);
 
        return 0;
index 4d2036209b45d9391828daeeeea5d583729a821e..758dae8d650066006189ef429c589ddedf87bf27 100644 (file)
@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN005b", /* P50 */
        "LEN005e", /* T560 */
        "LEN006c", /* T470s */
+       "LEN007a", /* T470s */
        "LEN0071", /* T480 */
        "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
        "LEN0073", /* X1 Carbon G5 (Elantech) */
index 190b9974526bb9e78f8b3200283b680690db2079..258d5fe3d395c4670088aa0d736cac69c7d24550 100644 (file)
@@ -205,7 +205,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
 
        if (count) {
                kfree(attn_data.data);
-               attn_data.data = NULL;
+               drvdata->attn_data.data = NULL;
        }
 
        if (!kfifo_is_empty(&drvdata->attn_fifo))
@@ -1210,7 +1210,8 @@ static int rmi_driver_probe(struct device *dev)
        if (data->input) {
                rmi_driver_set_input_name(rmi_dev, data->input);
                if (!rmi_dev->xport->input) {
-                       if (input_register_device(data->input)) {
+                       retval = input_register_device(data->input);
+                       if (retval) {
                                dev_err(dev, "%s: Failed to register input device.\n",
                                        __func__);
                                goto err_destroy_functions;
index 08e919dbeb5d1f29d572b3970ab53f876623acdf..7e048b55746241de9e9d53021a87624282081735 100644 (file)
@@ -662,6 +662,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
                },
        },
+       {
+               /* Lenovo ThinkPad Twist S230u */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+               },
+       },
        { }
 };
 
index 14c577c16b169ff15f686fa8fd5c49e4007e73cd..2289f9638116c3beb93be701069561331852a800 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 
+#include <linux/bits.h>
 #include <linux/module.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
@@ -73,6 +74,7 @@
 #define FW_POS_STATE           1
 #define FW_POS_TOTAL           2
 #define FW_POS_XY              3
+#define FW_POS_TOOL_TYPE       33
 #define FW_POS_CHECKSUM                34
 #define FW_POS_WIDTH           35
 #define FW_POS_PRESSURE                45
@@ -842,6 +844,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
 {
        struct input_dev *input = ts->input;
        unsigned int n_fingers;
+       unsigned int tool_type;
        u16 finger_state;
        int i;
 
@@ -852,6 +855,10 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
        dev_dbg(&ts->client->dev,
                "n_fingers: %u, state: %04x\n",  n_fingers, finger_state);
 
+       /* Note: all fingers have the same tool type */
+       tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ?
+                       MT_TOOL_FINGER : MT_TOOL_PALM;
+
        for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
                if (finger_state & 1) {
                        unsigned int x, y, p, w;
@@ -867,7 +874,7 @@ static void elants_i2c_mt_event(struct elants_data *ts, u8 *buf)
                                i, x, y, p, w);
 
                        input_mt_slot(input, i);
-                       input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+                       input_mt_report_slot_state(input, tool_type, true);
                        input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
                        input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
                        input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
@@ -1307,6 +1314,8 @@ static int elants_i2c_probe(struct i2c_client *client,
        input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
        input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
        input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+       input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
+                            0, MT_TOOL_PALM, 0, 0);
        input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
        input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
index 69c6d559eeb0056aef5afc4df6473760b6036428..2ef1adaed9afb65201dcc2cdfe05103474bcbc8e 100644 (file)
@@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg,
        if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
                BUG();
 
-       /* Write register: use repeated start */
+       /* Write register */
        xfer[0].addr = client->addr;
-       xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
+       xfer[0].flags = client->flags & I2C_M_TEN;
        xfer[0].len = 1;
        xfer[0].buf = &buf;
 
        /* Read data */
        xfer[1].addr = client->addr;
-       xfer[1].flags = I2C_M_RD;
+       xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
        xfer[1].len = len;
        xfer[1].buf = val;
 
@@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client,
        const void *match_data;
        int error;
 
-       if (!i2c_check_functionality(client->adapter,
-                               I2C_FUNC_PROTOCOL_MANGLING)) {
-               dev_err(&client->dev,
-                       "Need i2c bus that supports protocol mangling\n");
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               dev_err(&client->dev, "Not supported I2C adapter\n");
                return -ENODEV;
        }
 
index 16d70201de4a3334fc4dd49dc3d205ce4d96bc52..397cb1d3f481baf16e1f298dae7653d1c83b7c54 100644 (file)
@@ -182,6 +182,7 @@ static const struct usb_device_id usbtouch_devices[] = {
 #endif
 
 #ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+       {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
        {USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
        {USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
        {USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
index a03c6d6833dfc34cf58601b6bda8ebbbc2ee8f73..96fb9ff5ff2e85c58cd140f28b1553a85e2e729f 100644 (file)
@@ -78,7 +78,7 @@ static struct qcom_icc_node *sdm845_osm_l3_nodes[] = {
        [SLAVE_OSM_L3] = &sdm845_osm_l3,
 };
 
-const static struct qcom_icc_desc sdm845_icc_osm_l3 = {
+static const struct qcom_icc_desc sdm845_icc_osm_l3 = {
        .nodes = sdm845_osm_l3_nodes,
        .num_nodes = ARRAY_SIZE(sdm845_osm_l3_nodes),
 };
@@ -91,7 +91,7 @@ static struct qcom_icc_node *sc7180_osm_l3_nodes[] = {
        [SLAVE_OSM_L3] = &sc7180_osm_l3,
 };
 
-const static struct qcom_icc_desc sc7180_icc_osm_l3 = {
+static const struct qcom_icc_desc sc7180_icc_osm_l3 = {
        .nodes = sc7180_osm_l3_nodes,
        .num_nodes = ARRAY_SIZE(sc7180_osm_l3_nodes),
 };
index b013b80caa45286b59c6ce28f58559531cbd042a..f6c7b969520d0a0434461e35c81f6e6cf00a493b 100644 (file)
@@ -192,7 +192,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
        [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
 };
 
-const static struct qcom_icc_desc sdm845_aggre1_noc = {
+static const struct qcom_icc_desc sdm845_aggre1_noc = {
        .nodes = aggre1_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
        .bcms = aggre1_noc_bcms,
@@ -220,7 +220,7 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
        [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
 };
 
-const static struct qcom_icc_desc sdm845_aggre2_noc = {
+static const struct qcom_icc_desc sdm845_aggre2_noc = {
        .nodes = aggre2_noc_nodes,
        .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
        .bcms = aggre2_noc_bcms,
@@ -281,7 +281,7 @@ static struct qcom_icc_node *config_noc_nodes[] = {
        [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
 };
 
-const static struct qcom_icc_desc sdm845_config_noc = {
+static const struct qcom_icc_desc sdm845_config_noc = {
        .nodes = config_noc_nodes,
        .num_nodes = ARRAY_SIZE(config_noc_nodes),
        .bcms = config_noc_bcms,
@@ -297,7 +297,7 @@ static struct qcom_icc_node *dc_noc_nodes[] = {
        [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
 };
 
-const static struct qcom_icc_desc sdm845_dc_noc = {
+static const struct qcom_icc_desc sdm845_dc_noc = {
        .nodes = dc_noc_nodes,
        .num_nodes = ARRAY_SIZE(dc_noc_nodes),
        .bcms = dc_noc_bcms,
@@ -315,7 +315,7 @@ static struct qcom_icc_node *gladiator_noc_nodes[] = {
        [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
 };
 
-const static struct qcom_icc_desc sdm845_gladiator_noc = {
+static const struct qcom_icc_desc sdm845_gladiator_noc = {
        .nodes = gladiator_noc_nodes,
        .num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
        .bcms = gladiator_noc_bcms,
@@ -350,7 +350,7 @@ static struct qcom_icc_node *mem_noc_nodes[] = {
        [SLAVE_EBI1] = &ebi,
 };
 
-const static struct qcom_icc_desc sdm845_mem_noc = {
+static const struct qcom_icc_desc sdm845_mem_noc = {
        .nodes = mem_noc_nodes,
        .num_nodes = ARRAY_SIZE(mem_noc_nodes),
        .bcms = mem_noc_bcms,
@@ -384,7 +384,7 @@ static struct qcom_icc_node *mmss_noc_nodes[] = {
        [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
 };
 
-const static struct qcom_icc_desc sdm845_mmss_noc = {
+static const struct qcom_icc_desc sdm845_mmss_noc = {
        .nodes = mmss_noc_nodes,
        .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
        .bcms = mmss_noc_bcms,
@@ -430,7 +430,7 @@ static struct qcom_icc_node *system_noc_nodes[] = {
        [SLAVE_TCU] = &xs_sys_tcu_cfg,
 };
 
-const static struct qcom_icc_desc sdm845_system_noc = {
+static const struct qcom_icc_desc sdm845_system_noc = {
        .nodes = system_noc_nodes,
        .num_nodes = ARRAY_SIZE(system_noc_nodes),
        .bcms = system_noc_bcms,
index 58b4a4dbfc78b9a59f5f26a6683c1aaf03dafebd..aca76383f201a468f9f74675e3811d0e275be6d4 100644 (file)
@@ -303,6 +303,15 @@ config ROCKCHIP_IOMMU
          Say Y here if you are using a Rockchip SoC that includes an IOMMU
          device.
 
+config SUN50I_IOMMU
+       bool "Allwinner H6 IOMMU Support"
+       depends on ARCH_SUNXI || COMPILE_TEST
+       select ARM_DMA_USE_IOMMU
+       select IOMMU_API
+       select IOMMU_DMA
+       help
+         Support for the IOMMU introduced in the Allwinner H6 SoCs.
+
 config TEGRA_IOMMU_GART
        bool "Tegra GART IOMMU Support"
        depends on ARCH_TEGRA_2x_SOC
@@ -362,7 +371,7 @@ config IPMMU_VMSA
 
 config SPAPR_TCE_IOMMU
        bool "sPAPR TCE IOMMU Support"
-       depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST)
+       depends on PPC_POWERNV || PPC_PSERIES
        select IOMMU_API
        help
          Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -457,7 +466,7 @@ config S390_AP_IOMMU
 
 config MTK_IOMMU
        bool "MTK IOMMU Support"
-       depends on ARM || ARM64 || COMPILE_TEST
+       depends on HAS_DMA
        depends on ARCH_MEDIATEK || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
index 9f33fdb3bb0516feb086741a2b4ac89a9e6331dc..57cf4ba5e27cb30689d846a376bd74ff72e6c4e3 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o
 obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
+obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
 obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
index c303674136832a490fa749432ddf47fca783e352..311ef7105c6d0c3721110d5111708de64729fd99 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/dma-direct.h>
 #include <linux/dma-iommu.h>
 #include <linux/iommu-helper.h>
-#include <linux/iommu.h>
 #include <linux/delay.h>
 #include <linux/amd-iommu.h>
 #include <linux/notifier.h>
@@ -43,8 +42,7 @@
 #include <asm/gart.h>
 #include <asm/dma.h>
 
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+#include "amd_iommu.h"
 #include "irq_remapping.h"
 
 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
@@ -71,6 +69,8 @@
  */
 #define AMD_IOMMU_PGSIZES      ((~0xFFFUL) & ~(2ULL << 38))
 
+#define DEFAULT_PGTABLE_LEVEL  PAGE_MODE_3_LEVEL
+
 static DEFINE_SPINLOCK(pd_bitmap_lock);
 
 /* List of all available dev_data structures */
@@ -99,8 +99,9 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
-static int protection_domain_init(struct protection_domain *domain);
 static void detach_device(struct device *dev);
+static void update_and_flush_device_table(struct protection_domain *domain,
+                                         struct domain_pgtable *pgtable);
 
 /****************************************************************************
  *
@@ -125,7 +126,8 @@ static inline int get_acpihid_device_id(struct device *dev,
                return -ENODEV;
 
        list_for_each_entry(p, &acpihid_map, list) {
-               if (acpi_dev_hid_uid_match(adev, p->hid, p->uid)) {
+               if (acpi_dev_hid_uid_match(adev, p->hid,
+                                          p->uid[0] ? p->uid : NULL)) {
                        if (entry)
                                *entry = p;
                        return p->devid;
@@ -151,6 +153,26 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
        return container_of(dom, struct protection_domain, domain);
 }
 
+static void amd_iommu_domain_get_pgtable(struct protection_domain *domain,
+                                        struct domain_pgtable *pgtable)
+{
+       u64 pt_root = atomic64_read(&domain->pt_root);
+
+       pgtable->root = (u64 *)(pt_root & PAGE_MASK);
+       pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */
+}
+
+static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode)
+{
+       u64 pt_root;
+
+       /* lowest 3 bits encode pgtable mode */
+       pt_root = mode & 7;
+       pt_root |= (u64)root;
+
+       return pt_root;
+}
+
 static struct iommu_dev_data *alloc_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -257,12 +279,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
        return dev_data;
 }
 
-struct iommu_dev_data *get_dev_data(struct device *dev)
-{
-       return dev->archdata.iommu;
-}
-EXPORT_SYMBOL(get_dev_data);
-
 /*
 * Find or create an IOMMU group for a acpihid device.
 */
@@ -291,16 +307,15 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
 static bool pci_iommuv2_capable(struct pci_dev *pdev)
 {
        static const int caps[] = {
-               PCI_EXT_CAP_ID_ATS,
                PCI_EXT_CAP_ID_PRI,
                PCI_EXT_CAP_ID_PASID,
        };
        int i, pos;
 
-       if (pci_ats_disabled())
+       if (!pci_ats_supported(pdev))
                return false;
 
-       for (i = 0; i < 3; ++i) {
+       for (i = 0; i < 2; ++i) {
                pos = pci_find_ext_capability(pdev, caps[i]);
                if (pos == 0)
                        return false;
@@ -313,7 +328,7 @@ static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
 {
        struct iommu_dev_data *dev_data;
 
-       dev_data = get_dev_data(&pdev->dev);
+       dev_data = dev_iommu_priv_get(&pdev->dev);
 
        return dev_data->errata & (1 << erratum) ? true : false;
 }
@@ -348,7 +363,7 @@ static int iommu_init_device(struct device *dev)
        struct iommu_dev_data *dev_data;
        int devid;
 
-       if (dev->archdata.iommu)
+       if (dev_iommu_priv_get(dev))
                return 0;
 
        devid = get_device_id(dev);
@@ -375,7 +390,7 @@ static int iommu_init_device(struct device *dev)
                dev_data->iommu_v2 = iommu->is_iommu_v2;
        }
 
-       dev->archdata.iommu = dev_data;
+       dev_iommu_priv_set(dev, dev_data);
 
        return 0;
 }
@@ -397,22 +412,16 @@ static void iommu_ignore_device(struct device *dev)
 static void amd_iommu_uninit_device(struct device *dev)
 {
        struct iommu_dev_data *dev_data;
-       struct amd_iommu *iommu;
-       int devid;
-
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return;
-
-       iommu = amd_iommu_rlookup_table[devid];
 
-       dev_data = search_dev_data(devid);
+       dev_data = dev_iommu_priv_get(dev);
        if (!dev_data)
                return;
 
        if (dev_data->domain)
                detach_device(dev);
 
+       dev_iommu_priv_set(dev, NULL);
+
        /*
         * We keep dev_data around for unplugged devices and reuse it when the
         * device is re-plugged - not doing so would introduce a ton of races.
@@ -475,7 +484,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
        pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
                                           devid & 0xff);
        if (pdev)
-               dev_data = get_dev_data(&pdev->dev);
+               dev_data = dev_iommu_priv_get(&pdev->dev);
 
        if (dev_data && __ratelimit(&dev_data->rs)) {
                pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
@@ -1372,15 +1381,19 @@ static struct page *free_sub_pt(unsigned long root, int mode,
        return freelist;
 }
 
-static void free_pagetable(struct protection_domain *domain)
+static void free_pagetable(struct domain_pgtable *pgtable)
 {
-       unsigned long root = (unsigned long)domain->pt_root;
        struct page *freelist = NULL;
+       unsigned long root;
 
-       BUG_ON(domain->mode < PAGE_MODE_NONE ||
-              domain->mode > PAGE_MODE_6_LEVEL);
+       if (pgtable->mode == PAGE_MODE_NONE)
+               return;
+
+       BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
+              pgtable->mode > PAGE_MODE_6_LEVEL);
 
-       freelist = free_sub_pt(root, domain->mode, freelist);
+       root = (unsigned long)pgtable->root;
+       freelist = free_sub_pt(root, pgtable->mode, freelist);
 
        free_page_list(freelist);
 }
@@ -1394,24 +1407,39 @@ static bool increase_address_space(struct protection_domain *domain,
                                   unsigned long address,
                                   gfp_t gfp)
 {
+       struct domain_pgtable pgtable;
        unsigned long flags;
-       bool ret = false;
-       u64 *pte;
+       bool ret = true;
+       u64 *pte, root;
 
        spin_lock_irqsave(&domain->lock, flags);
 
-       if (address <= PM_LEVEL_SIZE(domain->mode) ||
-           WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       if (address <= PM_LEVEL_SIZE(pgtable.mode))
+               goto out;
+
+       ret = false;
+       if (WARN_ON_ONCE(pgtable.mode == PAGE_MODE_6_LEVEL))
                goto out;
 
        pte = (void *)get_zeroed_page(gfp);
        if (!pte)
                goto out;
 
-       *pte             = PM_LEVEL_PDE(domain->mode,
-                                       iommu_virt_to_phys(domain->pt_root));
-       domain->pt_root  = pte;
-       domain->mode    += 1;
+       *pte = PM_LEVEL_PDE(pgtable.mode, iommu_virt_to_phys(pgtable.root));
+
+       pgtable.root  = pte;
+       pgtable.mode += 1;
+       update_and_flush_device_table(domain, &pgtable);
+       domain_flush_complete(domain);
+
+       /*
+        * Device Table needs to be updated and flushed before the new root can
+        * be published.
+        */
+       root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode);
+       atomic64_set(&domain->pt_root, root);
 
        ret = true;
 
@@ -1428,16 +1456,29 @@ static u64 *alloc_pte(struct protection_domain *domain,
                      gfp_t gfp,
                      bool *updated)
 {
+       struct domain_pgtable pgtable;
        int level, end_lvl;
        u64 *pte, *page;
 
        BUG_ON(!is_power_of_2(page_size));
 
-       while (address > PM_LEVEL_SIZE(domain->mode))
-               *updated = increase_address_space(domain, address, gfp) || *updated;
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
 
-       level   = domain->mode - 1;
-       pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+       while (address > PM_LEVEL_SIZE(pgtable.mode)) {
+               /*
+                * Return an error if there is no memory to update the
+                * page-table.
+                */
+               if (!increase_address_space(domain, address, gfp))
+                       return NULL;
+
+               /* Read new values to check if update was successful */
+               amd_iommu_domain_get_pgtable(domain, &pgtable);
+       }
+
+
+       level   = pgtable.mode - 1;
+       pte     = &pgtable.root[PM_LEVEL_INDEX(level, address)];
        address = PAGE_SIZE_ALIGN(address, page_size);
        end_lvl = PAGE_SIZE_LEVEL(page_size);
 
@@ -1513,16 +1554,19 @@ static u64 *fetch_pte(struct protection_domain *domain,
                      unsigned long address,
                      unsigned long *page_size)
 {
+       struct domain_pgtable pgtable;
        int level;
        u64 *pte;
 
        *page_size = 0;
 
-       if (address > PM_LEVEL_SIZE(domain->mode))
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
+       if (address > PM_LEVEL_SIZE(pgtable.mode))
                return NULL;
 
-       level      =  domain->mode - 1;
-       pte        = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
+       level      =  pgtable.mode - 1;
+       pte        = &pgtable.root[PM_LEVEL_INDEX(level, address)];
        *page_size =  PTE_LEVEL_PAGE_SIZE(level);
 
        while (level > 0) {
@@ -1637,7 +1681,13 @@ out:
                unsigned long flags;
 
                spin_lock_irqsave(&dom->lock, flags);
-               update_domain(dom);
+               /*
+                * Flush domain TLB(s) and wait for completion. Any Device-Table
+                * Updates and flushing already happened in
+                * increase_address_space().
+                */
+               domain_flush_tlb_pde(dom);
+               domain_flush_complete(dom);
                spin_unlock_irqrestore(&dom->lock, flags);
        }
 
@@ -1756,78 +1806,18 @@ static void free_gcr3_table(struct protection_domain *domain)
        free_page((unsigned long)domain->gcr3_tbl);
 }
 
-/*
- * Free a domain, only used if something went wrong in the
- * allocation path and we need to free an already allocated page table
- */
-static void dma_ops_domain_free(struct protection_domain *domain)
-{
-       if (!domain)
-               return;
-
-       iommu_put_dma_cookie(&domain->domain);
-
-       free_pagetable(domain);
-
-       if (domain->id)
-               domain_id_free(domain->id);
-
-       kfree(domain);
-}
-
-/*
- * Allocates a new protection domain usable for the dma_ops functions.
- * It also initializes the page table and the address allocator data
- * structures required for the dma_ops interface
- */
-static struct protection_domain *dma_ops_domain_alloc(void)
-{
-       struct protection_domain *domain;
-
-       domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
-       if (!domain)
-               return NULL;
-
-       if (protection_domain_init(domain))
-               goto free_domain;
-
-       domain->mode = PAGE_MODE_3_LEVEL;
-       domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-       domain->flags = PD_DMA_OPS_MASK;
-       if (!domain->pt_root)
-               goto free_domain;
-
-       if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
-               goto free_domain;
-
-       return domain;
-
-free_domain:
-       dma_ops_domain_free(domain);
-
-       return NULL;
-}
-
-/*
- * little helper function to check whether a given protection domain is a
- * dma_ops domain
- */
-static bool dma_ops_domain(struct protection_domain *domain)
-{
-       return domain->flags & PD_DMA_OPS_MASK;
-}
-
 static void set_dte_entry(u16 devid, struct protection_domain *domain,
+                         struct domain_pgtable *pgtable,
                          bool ats, bool ppr)
 {
        u64 pte_root = 0;
        u64 flags = 0;
        u32 old_domid;
 
-       if (domain->mode != PAGE_MODE_NONE)
-               pte_root = iommu_virt_to_phys(domain->pt_root);
+       if (pgtable->mode != PAGE_MODE_NONE)
+               pte_root = iommu_virt_to_phys(pgtable->root);
 
-       pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
+       pte_root |= (pgtable->mode & DEV_ENTRY_MODE_MASK)
                    << DEV_ENTRY_MODE_SHIFT;
        pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
 
@@ -1900,6 +1890,7 @@ static void clear_dte_entry(u16 devid)
 static void do_attach(struct iommu_dev_data *dev_data,
                      struct protection_domain *domain)
 {
+       struct domain_pgtable pgtable;
        struct amd_iommu *iommu;
        bool ats;
 
@@ -1915,7 +1906,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
        domain->dev_cnt                 += 1;
 
        /* Update device table */
-       set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       set_dte_entry(dev_data->devid, domain, &pgtable,
+                     ats, dev_data->iommu_v2);
        clone_aliases(dev_data->pdev);
 
        device_flush_dte(dev_data);
@@ -2031,7 +2024,7 @@ static int attach_device(struct device *dev,
 
        spin_lock_irqsave(&domain->lock, flags);
 
-       dev_data = get_dev_data(dev);
+       dev_data = dev_iommu_priv_get(dev);
 
        spin_lock(&dev_data->lock);
 
@@ -2095,7 +2088,7 @@ static void detach_device(struct device *dev)
        struct iommu_dev_data *dev_data;
        unsigned long flags;
 
-       dev_data = get_dev_data(dev);
+       dev_data = dev_iommu_priv_get(dev);
        domain   = dev_data->domain;
 
        spin_lock_irqsave(&domain->lock, flags);
@@ -2144,7 +2137,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
 
        iommu = amd_iommu_rlookup_table[devid];
 
-       if (get_dev_data(dev))
+       if (dev_iommu_priv_get(dev))
                return &iommu->iommu;
 
        ret = iommu_init_device(dev);
@@ -2174,16 +2167,12 @@ static void amd_iommu_probe_finalize(struct device *dev)
 
 static void amd_iommu_release_device(struct device *dev)
 {
+       int devid = get_device_id(dev);
        struct amd_iommu *iommu;
-       int devid;
 
        if (!check_device(dev))
                return;
 
-       devid = get_device_id(dev);
-       if (devid < 0)
-               return;
-
        iommu = amd_iommu_rlookup_table[devid];
 
        amd_iommu_uninit_device(dev);
@@ -2224,23 +2213,36 @@ static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
  *
  *****************************************************************************/
 
-static void update_device_table(struct protection_domain *domain)
+static void update_device_table(struct protection_domain *domain,
+                               struct domain_pgtable *pgtable)
 {
        struct iommu_dev_data *dev_data;
 
        list_for_each_entry(dev_data, &domain->dev_list, list) {
-               set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
-                             dev_data->iommu_v2);
+               set_dte_entry(dev_data->devid, domain, pgtable,
+                             dev_data->ats.enabled, dev_data->iommu_v2);
                clone_aliases(dev_data->pdev);
        }
 }
 
+static void update_and_flush_device_table(struct protection_domain *domain,
+                                         struct domain_pgtable *pgtable)
+{
+       update_device_table(domain, pgtable);
+       domain_flush_devices(domain);
+}
+
 static void update_domain(struct protection_domain *domain)
 {
-       update_device_table(domain);
+       struct domain_pgtable pgtable;
 
-       domain_flush_devices(domain);
+       /* Update device table */
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       update_and_flush_device_table(domain, &pgtable);
+
+       /* Flush domain TLB(s) and wait for completion */
        domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
 }
 
 int __init amd_iommu_init_api(void)
@@ -2308,27 +2310,46 @@ static void cleanup_domain(struct protection_domain *domain)
 
 static void protection_domain_free(struct protection_domain *domain)
 {
+       struct domain_pgtable pgtable;
+
        if (!domain)
                return;
 
        if (domain->id)
                domain_id_free(domain->id);
 
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       atomic64_set(&domain->pt_root, 0);
+       free_pagetable(&pgtable);
+
        kfree(domain);
 }
 
-static int protection_domain_init(struct protection_domain *domain)
+static int protection_domain_init(struct protection_domain *domain, int mode)
 {
+       u64 *pt_root = NULL, root;
+
+       BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
+
        spin_lock_init(&domain->lock);
        domain->id = domain_id_alloc();
        if (!domain->id)
                return -ENOMEM;
        INIT_LIST_HEAD(&domain->dev_list);
 
+       if (mode != PAGE_MODE_NONE) {
+               pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+               if (!pt_root)
+                       return -ENOMEM;
+       }
+
+       root = amd_iommu_domain_encode_pgtable(pt_root, mode);
+       atomic64_set(&domain->pt_root, root);
+
        return 0;
 }
 
-static struct protection_domain *protection_domain_alloc(void)
+static struct protection_domain *protection_domain_alloc(int mode)
 {
        struct protection_domain *domain;
 
@@ -2336,7 +2357,7 @@ static struct protection_domain *protection_domain_alloc(void)
        if (!domain)
                return NULL;
 
-       if (protection_domain_init(domain))
+       if (protection_domain_init(domain, mode))
                goto out_err;
 
        return domain;
@@ -2349,45 +2370,30 @@ out_err:
 
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
-       struct protection_domain *pdomain;
+       struct protection_domain *domain;
+       int mode = DEFAULT_PGTABLE_LEVEL;
 
-       switch (type) {
-       case IOMMU_DOMAIN_UNMANAGED:
-               pdomain = protection_domain_alloc();
-               if (!pdomain)
-                       return NULL;
+       if (type == IOMMU_DOMAIN_IDENTITY)
+               mode = PAGE_MODE_NONE;
 
-               pdomain->mode    = PAGE_MODE_3_LEVEL;
-               pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-               if (!pdomain->pt_root) {
-                       protection_domain_free(pdomain);
-                       return NULL;
-               }
+       domain = protection_domain_alloc(mode);
+       if (!domain)
+               return NULL;
 
-               pdomain->domain.geometry.aperture_start = 0;
-               pdomain->domain.geometry.aperture_end   = ~0ULL;
-               pdomain->domain.geometry.force_aperture = true;
+       domain->domain.geometry.aperture_start = 0;
+       domain->domain.geometry.aperture_end   = ~0ULL;
+       domain->domain.geometry.force_aperture = true;
 
-               break;
-       case IOMMU_DOMAIN_DMA:
-               pdomain = dma_ops_domain_alloc();
-               if (!pdomain) {
-                       pr_err("Failed to allocate\n");
-                       return NULL;
-               }
-               break;
-       case IOMMU_DOMAIN_IDENTITY:
-               pdomain = protection_domain_alloc();
-               if (!pdomain)
-                       return NULL;
+       if (type == IOMMU_DOMAIN_DMA &&
+           iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+               goto free_domain;
 
-               pdomain->mode = PAGE_MODE_NONE;
-               break;
-       default:
-               return NULL;
-       }
+       return &domain->domain;
 
-       return &pdomain->domain;
+free_domain:
+       protection_domain_free(domain);
+
+       return NULL;
 }
 
 static void amd_iommu_domain_free(struct iommu_domain *dom)
@@ -2404,27 +2410,19 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
        if (!dom)
                return;
 
-       switch (dom->type) {
-       case IOMMU_DOMAIN_DMA:
-               /* Now release the domain */
-               dma_ops_domain_free(domain);
-               break;
-       default:
-               if (domain->mode != PAGE_MODE_NONE)
-                       free_pagetable(domain);
+       if (dom->type == IOMMU_DOMAIN_DMA)
+               iommu_put_dma_cookie(&domain->domain);
 
-               if (domain->flags & PD_IOMMUV2_MASK)
-                       free_gcr3_table(domain);
+       if (domain->flags & PD_IOMMUV2_MASK)
+               free_gcr3_table(domain);
 
-               protection_domain_free(domain);
-               break;
-       }
+       protection_domain_free(domain);
 }
 
 static void amd_iommu_detach_device(struct iommu_domain *dom,
                                    struct device *dev)
 {
-       struct iommu_dev_data *dev_data = dev->archdata.iommu;
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
        struct amd_iommu *iommu;
        int devid;
 
@@ -2462,7 +2460,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
        if (!check_device(dev))
                return -EINVAL;
 
-       dev_data = dev->archdata.iommu;
+       dev_data = dev_iommu_priv_get(dev);
        dev_data->defer_attach = false;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -2493,10 +2491,12 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
                         gfp_t gfp)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
        int prot = 0;
        int ret;
 
-       if (domain->mode == PAGE_MODE_NONE)
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
                return -EINVAL;
 
        if (iommu_prot & IOMMU_READ)
@@ -2516,8 +2516,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
                              struct iommu_iotlb_gather *gather)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
 
-       if (domain->mode == PAGE_MODE_NONE)
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
                return 0;
 
        return iommu_unmap_page(domain, iova, page_size);
@@ -2528,9 +2530,11 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
 {
        struct protection_domain *domain = to_pdomain(dom);
        unsigned long offset_mask, pte_pgsize;
+       struct domain_pgtable pgtable;
        u64 *pte, __pte;
 
-       if (domain->mode == PAGE_MODE_NONE)
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode == PAGE_MODE_NONE)
                return iova;
 
        pte = fetch_pte(domain, iova, &pte_pgsize);
@@ -2612,12 +2616,14 @@ static void amd_iommu_get_resv_regions(struct device *dev,
        list_add_tail(&region->list, head);
 }
 
-static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
-                                        struct device *dev)
+bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+                                 struct device *dev)
 {
-       struct iommu_dev_data *dev_data = dev->archdata.iommu;
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
+
        return dev_data->defer_attach;
 }
+EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
 
 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
@@ -2640,7 +2646,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
 {
        struct iommu_dev_data *dev_data;
 
-       dev_data = get_dev_data(dev);
+       dev_data = dev_iommu_priv_get(dev);
        if (!dev_data)
                return 0;
 
@@ -2699,18 +2705,22 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
 void amd_iommu_domain_direct_map(struct iommu_domain *dom)
 {
        struct protection_domain *domain = to_pdomain(dom);
+       struct domain_pgtable pgtable;
        unsigned long flags;
 
        spin_lock_irqsave(&domain->lock, flags);
 
+       /* First save pgtable configuration*/
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+
        /* Update data structure */
-       domain->mode    = PAGE_MODE_NONE;
+       atomic64_set(&domain->pt_root, 0);
 
        /* Make changes visible to IOMMUs */
        update_domain(domain);
 
        /* Page-table is not visible to IOMMU anymore, so free it */
-       free_pagetable(domain);
+       free_pagetable(&pgtable);
 
        spin_unlock_irqrestore(&domain->lock, flags);
 }
@@ -2899,9 +2909,11 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
 static int __set_gcr3(struct protection_domain *domain, int pasid,
                      unsigned long cr3)
 {
+       struct domain_pgtable pgtable;
        u64 *pte;
 
-       if (domain->mode != PAGE_MODE_NONE)
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode != PAGE_MODE_NONE)
                return -EINVAL;
 
        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
@@ -2915,9 +2927,11 @@ static int __set_gcr3(struct protection_domain *domain, int pasid,
 
 static int __clear_gcr3(struct protection_domain *domain, int pasid)
 {
+       struct domain_pgtable pgtable;
        u64 *pte;
 
-       if (domain->mode != PAGE_MODE_NONE)
+       amd_iommu_domain_get_pgtable(domain, &pgtable);
+       if (pgtable.mode != PAGE_MODE_NONE)
                return -EINVAL;
 
        pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
@@ -2965,7 +2979,7 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
        struct amd_iommu *iommu;
        struct iommu_cmd cmd;
 
-       dev_data = get_dev_data(&pdev->dev);
+       dev_data = dev_iommu_priv_get(&pdev->dev);
        iommu    = amd_iommu_rlookup_table[dev_data->devid];
 
        build_complete_ppr(&cmd, dev_data->devid, pasid, status,
@@ -2978,23 +2992,27 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
 struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
 {
        struct protection_domain *pdomain;
-       struct iommu_domain *io_domain;
+       struct iommu_dev_data *dev_data;
        struct device *dev = &pdev->dev;
+       struct iommu_domain *io_domain;
 
        if (!check_device(dev))
                return NULL;
 
-       pdomain = get_dev_data(dev)->domain;
-       if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
-               get_dev_data(dev)->defer_attach = false;
-               io_domain = iommu_get_domain_for_dev(dev);
+       dev_data  = dev_iommu_priv_get(&pdev->dev);
+       pdomain   = dev_data->domain;
+       io_domain = iommu_get_domain_for_dev(dev);
+
+       if (pdomain == NULL && dev_data->defer_attach) {
+               dev_data->defer_attach = false;
                pdomain = to_pdomain(io_domain);
                attach_device(dev, pdomain);
        }
+
        if (pdomain == NULL)
                return NULL;
 
-       if (!dma_ops_domain(pdomain))
+       if (io_domain->type != IOMMU_DOMAIN_DMA)
                return NULL;
 
        /* Only return IOMMUv2 domains */
@@ -3012,7 +3030,7 @@ void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
        if (!amd_iommu_v2_supported())
                return;
 
-       dev_data = get_dev_data(&pdev->dev);
+       dev_data = dev_iommu_priv_get(&pdev->dev);
        dev_data->errata |= (1 << erratum);
 }
 EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
@@ -3031,11 +3049,8 @@ int amd_iommu_device_info(struct pci_dev *pdev,
 
        memset(info, 0, sizeof(*info));
 
-       if (!pci_ats_disabled()) {
-               pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
-               if (pos)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
-       }
+       if (pci_ats_supported(pdev))
+               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
 
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
        if (pos)
index 12d540d9b59b0a8e1941934d695b1bc7312a7b0f..f892992c8744dfe28d3216d41d68bbcfd553bfcd 100644 (file)
@@ -1,9 +1,103 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <jroedel@suse.de>
+ */
 
 #ifndef AMD_IOMMU_H
 #define AMD_IOMMU_H
 
-int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
+#include <linux/iommu.h>
+
+#include "amd_iommu_types.h"
+
+extern int amd_iommu_get_num_iommus(void);
+extern int amd_iommu_init_dma_ops(void);
+extern int amd_iommu_init_passthrough(void);
+extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
+extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern int amd_iommu_init_devices(void);
+extern void amd_iommu_uninit_devices(void);
+extern void amd_iommu_init_notifier(void);
+extern int amd_iommu_init_api(void);
+
+#ifdef CONFIG_AMD_IOMMU_DEBUGFS
+void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+#else
+static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+#endif
+
+/* Needed for interrupt remapping */
+extern int amd_iommu_prepare(void);
+extern int amd_iommu_enable(void);
+extern void amd_iommu_disable(void);
+extern int amd_iommu_reenable(int);
+extern int amd_iommu_enable_faulting(void);
+extern int amd_iommu_guest_ir;
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+                               u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+                                    unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#ifdef CONFIG_IRQ_REMAP
+extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+       return 0;
+}
+#endif
+
+#define PPR_SUCCESS                    0x0
+#define PPR_INVALID                    0x1
+#define PPR_FAILURE                    0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+                                 int status, int tag);
+
+static inline bool is_rd890_iommu(struct pci_dev *pdev)
+{
+       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
+              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
+}
+
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+{
+       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+               return false;
+
+       return !!(iommu->features & f);
+}
+
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+       return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+       return phys_to_virt(__sme_clr(paddr));
+}
+
+extern bool translation_pre_enabled(struct amd_iommu *iommu);
+extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
+                                        struct device *dev);
+extern int __init add_special_device(u8 type, u8 id, u16 *devid,
+                                    bool cmd_line);
 
 #ifdef CONFIG_DMI
 void amd_iommu_apply_ivrs_quirks(void);
index c6a5c737ef0947654cb95607b43c7f3e9ae854ad..545372fcc72f2ad71d4b295dce6b8b2178eecede 100644 (file)
@@ -8,10 +8,9 @@
  */
 
 #include <linux/debugfs.h>
-#include <linux/iommu.h>
 #include <linux/pci.h>
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
+
+#include "amd_iommu.h"
 
 static struct dentry *amd_iommu_debugfs;
 static DEFINE_MUTEX(amd_iommu_debugfs_lock);
index 6be3853a5d978e09b8aeebb02daaec01e3a80cc9..3faff7f80fd25d990ebd62c3a401756450c0d277 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/msi.h>
 #include <linux/amd-iommu.h>
 #include <linux/export.h>
-#include <linux/iommu.h>
 #include <linux/kmemleak.h>
 #include <linux/mem_encrypt.h>
 #include <asm/pci-direct.h>
@@ -32,9 +31,8 @@
 #include <asm/irq_remapping.h>
 
 #include <linux/crash_dump.h>
+
 #include "amd_iommu.h"
-#include "amd_iommu_proto.h"
-#include "amd_iommu_types.h"
 #include "irq_remapping.h"
 
 /*
@@ -1329,8 +1327,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                }
                case IVHD_DEV_ACPI_HID: {
                        u16 devid;
-                       u8 hid[ACPIHID_HID_LEN] = {0};
-                       u8 uid[ACPIHID_UID_LEN] = {0};
+                       u8 hid[ACPIHID_HID_LEN];
+                       u8 uid[ACPIHID_UID_LEN];
                        int ret;
 
                        if (h->type != 0x40) {
@@ -1347,6 +1345,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                                break;
                        }
 
+                       uid[0] = '\0';
                        switch (e->uidf) {
                        case UID_NOT_PRESENT:
 
@@ -1361,8 +1360,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                                break;
                        case UID_IS_CHARACTER:
 
-                               memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
-                               uid[ACPIHID_UID_LEN - 1] = '\0';
+                               memcpy(uid, &e->uid, e->uidl);
+                               uid[e->uidl] = '\0';
 
                                break;
                        default:
@@ -2936,7 +2935,7 @@ static int __init parse_amd_iommu_intr(char *str)
 {
        for (; *str; ++str) {
                if (strncmp(str, "legacy", 6) == 0) {
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
                        break;
                }
                if (strncmp(str, "vapic", 5) == 0) {
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
deleted file mode 100644 (file)
index 92c2ba6..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
-#define _ASM_X86_AMD_IOMMU_PROTO_H
-
-#include "amd_iommu_types.h"
-
-extern int amd_iommu_get_num_iommus(void);
-extern int amd_iommu_init_dma_ops(void);
-extern int amd_iommu_init_passthrough(void);
-extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
-extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
-extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
-extern int amd_iommu_init_api(void);
-
-#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
-#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
-#endif
-
-/* Needed for interrupt remapping */
-extern int amd_iommu_prepare(void);
-extern int amd_iommu_enable(void);
-extern void amd_iommu_disable(void);
-extern int amd_iommu_reenable(int);
-extern int amd_iommu_enable_faulting(void);
-extern int amd_iommu_guest_ir;
-
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
-extern bool amd_iommu_v2_supported(void);
-extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
-extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
-                               u64 address);
-extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
-extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
-                                    unsigned long cr3);
-extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
-extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
-
-#ifdef CONFIG_IRQ_REMAP
-extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
-#else
-static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
-{
-       return 0;
-}
-#endif
-
-#define PPR_SUCCESS                    0x0
-#define PPR_INVALID                    0x1
-#define PPR_FAILURE                    0xf
-
-extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
-                                 int status, int tag);
-
-static inline bool is_rd890_iommu(struct pci_dev *pdev)
-{
-       return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
-              (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
-}
-
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
-{
-       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
-               return false;
-
-       return !!(iommu->features & f);
-}
-
-static inline u64 iommu_virt_to_phys(void *vaddr)
-{
-       return (u64)__sme_set(virt_to_phys(vaddr));
-}
-
-static inline void *iommu_phys_to_virt(unsigned long paddr)
-{
-       return phys_to_virt(__sme_clr(paddr));
-}
-
-extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern struct iommu_dev_data *get_dev_data(struct device *dev);
-#endif /* _ASM_X86_AMD_IOMMU_PROTO_H  */
index d0d7b6a0c3d88f7f1187ae3dabb9cf20d6a527ae..30a5d412255a4090fda29212b7e6c9b01343f46b 100644 (file)
 #define PD_IOMMUV2_MASK                (1UL << 3) /* domain has gcr3 table */
 
 extern bool amd_iommu_dump;
-#define DUMP_printk(format, arg...)                                    \
-       do {                                                            \
-               if (amd_iommu_dump)                                             \
-                       printk(KERN_INFO "AMD-Vi: " format, ## arg);    \
+#define DUMP_printk(format, arg...)                            \
+       do {                                                    \
+               if (amd_iommu_dump)                             \
+                       pr_info("AMD-Vi: " format, ## arg);     \
        } while(0);
 
 /* global flag if IOMMUs cache non-present entries */
@@ -468,8 +468,7 @@ struct protection_domain {
                                       iommu core code */
        spinlock_t lock;        /* mostly used to lock the page table*/
        u16 id;                 /* the domain id written to the device table */
-       int mode;               /* paging mode (0-6 levels) */
-       u64 *pt_root;           /* page table root pointer */
+       atomic64_t pt_root;     /* pgtable root and pgtable mode */
        int glx;                /* Number of levels for GCR3 table */
        u64 *gcr3_tbl;          /* Guest CR3 table */
        unsigned long flags;    /* flags to find out type of domain */
@@ -477,6 +476,12 @@ struct protection_domain {
        unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
 };
 
+/* For decocded pt_root */
+struct domain_pgtable {
+       int mode;
+       u64 *root;
+};
+
 /*
  * Structure where we save information about one hardware AMD IOMMU in the
  * system.
index d6d85debd01b0060205348d31dfdf4e4170bb059..c8a7b6b392221485c60a061e6a674ef159329a60 100644 (file)
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/sched/mm.h>
-#include <linux/iommu.h>
 #include <linux/wait.h>
 #include <linux/pci.h>
 #include <linux/gfp.h>
 
-#include "amd_iommu_types.h"
-#include "amd_iommu_proto.h"
+#include "amd_iommu.h"
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
@@ -517,13 +515,12 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
        struct amd_iommu_fault *iommu_fault;
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
+       struct pci_dev *pdev = NULL;
        unsigned long flags;
        struct fault *fault;
        bool finish;
        u16 tag, devid;
        int ret;
-       struct iommu_dev_data *dev_data;
-       struct pci_dev *pdev = NULL;
 
        iommu_fault = data;
        tag         = iommu_fault->tag & 0x1ff;
@@ -534,12 +531,11 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
                                           devid & 0xff);
        if (!pdev)
                return -ENODEV;
-       dev_data = get_dev_data(&pdev->dev);
 
-       /* In kdump kernel pci dev is not initialized yet -> send INVALID */
        ret = NOTIFY_DONE;
-       if (translation_pre_enabled(amd_iommu_rlookup_table[devid])
-               && dev_data->defer_attach) {
+
+       /* In kdump kernel pci dev is not initialized yet -> send INVALID */
+       if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
                amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
                                       PPR_INVALID, tag);
                goto out;
index 8a908c50c306fb563bde30ad2183d6cdfff5230a..f578677a5c41437fd9ee367359838f1aea89a308 100644 (file)
@@ -2663,26 +2663,20 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
        }
 }
 
-#ifdef CONFIG_PCI_ATS
 static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
 {
-       struct pci_dev *pdev;
+       struct device *dev = master->dev;
        struct arm_smmu_device *smmu = master->smmu;
-       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev);
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
-       if (!(smmu->features & ARM_SMMU_FEAT_ATS) || !dev_is_pci(master->dev) ||
-           !(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS) || pci_ats_disabled())
+       if (!(smmu->features & ARM_SMMU_FEAT_ATS))
                return false;
 
-       pdev = to_pci_dev(master->dev);
-       return !pdev->untrusted && pdev->ats_cap;
-}
-#else
-static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
-{
-       return false;
+       if (!(fwspec->flags & IOMMU_FWSPEC_PCI_RC_ATS))
+               return false;
+
+       return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
 }
-#endif
 
 static void arm_smmu_enable_ats(struct arm_smmu_master *master)
 {
index f77dae7ba7d4089f9fbf441b70a3e121f1dd7deb..60a2970c37ff4f65fba98f5100b7ffd927ddfecc 100644 (file)
@@ -963,6 +963,7 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
                warn_invalid_dmar(phys_addr, " returns all ones");
                goto unmap;
        }
+       iommu->vccap = dmar_readq(iommu->reg + DMAR_VCCAP_REG);
 
        /* the registers might be more than one page */
        map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -1156,12 +1157,11 @@ static inline void reclaim_free_desc(struct q_inval *qi)
        }
 }
 
-static int qi_check_fault(struct intel_iommu *iommu, int index)
+static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
 {
        u32 fault;
        int head, tail;
        struct q_inval *qi = iommu->qi;
-       int wait_index = (index + 1) % QI_LENGTH;
        int shift = qi_shift(iommu);
 
        if (qi->desc_status[wait_index] == QI_ABORT)
@@ -1224,17 +1224,21 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
 }
 
 /*
- * Submit the queued invalidation descriptor to the remapping
- * hardware unit and wait for its completion.
+ * Function to submit invalidation descriptors of all types to the queued
+ * invalidation interface(QI). Multiple descriptors can be submitted at a
+ * time, a wait descriptor will be appended to each submission to ensure
+ * hardware has completed the invalidation before return. Wait descriptors
+ * can be part of the submission but it will not be polled for completion.
  */
-int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+                  unsigned int count, unsigned long options)
 {
-       int rc;
        struct q_inval *qi = iommu->qi;
-       int offset, shift, length;
        struct qi_desc wait_desc;
        int wait_index, index;
        unsigned long flags;
+       int offset, shift;
+       int rc, i;
 
        if (!qi)
                return 0;
@@ -1243,32 +1247,41 @@ restart:
        rc = 0;
 
        raw_spin_lock_irqsave(&qi->q_lock, flags);
-       while (qi->free_cnt < 3) {
+       /*
+        * Check if we have enough empty slots in the queue to submit,
+        * the calculation is based on:
+        * # of desc + 1 wait desc + 1 space between head and tail
+        */
+       while (qi->free_cnt < count + 2) {
                raw_spin_unlock_irqrestore(&qi->q_lock, flags);
                cpu_relax();
                raw_spin_lock_irqsave(&qi->q_lock, flags);
        }
 
        index = qi->free_head;
-       wait_index = (index + 1) % QI_LENGTH;
+       wait_index = (index + count) % QI_LENGTH;
        shift = qi_shift(iommu);
-       length = 1 << shift;
 
-       qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
+       for (i = 0; i < count; i++) {
+               offset = ((index + i) % QI_LENGTH) << shift;
+               memcpy(qi->desc + offset, &desc[i], 1 << shift);
+               qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE;
+       }
+       qi->desc_status[wait_index] = QI_IN_USE;
 
-       offset = index << shift;
-       memcpy(qi->desc + offset, desc, length);
        wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
                        QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
+       if (options & QI_OPT_WAIT_DRAIN)
+               wait_desc.qw0 |= QI_IWD_PRQ_DRAIN;
        wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
        wait_desc.qw2 = 0;
        wait_desc.qw3 = 0;
 
        offset = wait_index << shift;
-       memcpy(qi->desc + offset, &wait_desc, length);
+       memcpy(qi->desc + offset, &wait_desc, 1 << shift);
 
-       qi->free_head = (qi->free_head + 2) % QI_LENGTH;
-       qi->free_cnt -= 2;
+       qi->free_head = (qi->free_head + count + 1) % QI_LENGTH;
+       qi->free_cnt -= count + 1;
 
        /*
         * update the HW tail register indicating the presence of
@@ -1284,7 +1297,7 @@ restart:
                 * a deadlock where the interrupt context can wait indefinitely
                 * for free slots in the queue.
                 */
-               rc = qi_check_fault(iommu, index);
+               rc = qi_check_fault(iommu, index, wait_index);
                if (rc)
                        break;
 
@@ -1293,7 +1306,8 @@ restart:
                raw_spin_lock(&qi->q_lock);
        }
 
-       qi->desc_status[index] = QI_DONE;
+       for (i = 0; i < count; i++)
+               qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE;
 
        reclaim_free_desc(qi);
        raw_spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -1317,7 +1331,7 @@ void qi_global_iec(struct intel_iommu *iommu)
        desc.qw3 = 0;
 
        /* should never fail */
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
@@ -1331,7 +1345,7 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -1355,7 +1369,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
@@ -1377,7 +1391,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 /* PASID-based IOTLB invalidation */
@@ -1418,7 +1432,46 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
                                QI_EIOTLB_AM(mask);
        }
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+/* PASID-based device IOTLB Invalidate */
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+                             u32 pasid,  u16 qdep, u64 addr,
+                             unsigned int size_order, u64 granu)
+{
+       unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
+       struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+       desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
+               QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+               QI_DEV_IOTLB_PFSID(pfsid);
+       desc.qw1 = QI_DEV_EIOTLB_GLOB(granu);
+
+       /*
+        * If S bit is 0, we only flush a single page. If S bit is set,
+        * The least significant zero bit indicates the invalidation address
+        * range. VT-d spec 6.5.2.6.
+        * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
+        * size order = 0 is PAGE_SIZE 4KB
+        * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
+        * ECAP.
+        */
+       desc.qw1 |= addr & ~mask;
+       if (size_order)
+               desc.qw1 |= QI_DEV_EIOTLB_SIZE;
+
+       qi_submit_sync(iommu, &desc, 1, 0);
+}
+
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
+                         u64 granu, int pasid)
+{
+       struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+
+       desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
+                       QI_PC_GRAN(granu) | QI_PC_TYPE;
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 /*
index a386b83e0e34b260b1054d41f1e56f710931189b..3c0c67a99c7b6407a589d927af15dd4be74e4a02 100644 (file)
@@ -131,7 +131,7 @@ static int hyperv_irq_remapping_activate(struct irq_domain *domain,
        return 0;
 }
 
-static struct irq_domain_ops hyperv_ir_domain_ops = {
+static const struct irq_domain_ops hyperv_ir_domain_ops = {
        .alloc = hyperv_irq_remapping_alloc,
        .free = hyperv_irq_remapping_free,
        .activate = hyperv_irq_remapping_activate,
index 3eb1fe240fb00981471c3bc2b40a87d9ca81e4bc..cf1ebb98e4183bf070adab53e843f756525a716f 100644 (file)
@@ -372,6 +372,66 @@ static int domain_translation_struct_show(struct seq_file *m, void *unused)
 }
 DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
 
+static void invalidation_queue_entry_show(struct seq_file *m,
+                                         struct intel_iommu *iommu)
+{
+       int index, shift = qi_shift(iommu);
+       struct qi_desc *desc;
+       int offset;
+
+       if (ecap_smts(iommu->ecap))
+               seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
+       else
+               seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
+
+       for (index = 0; index < QI_LENGTH; index++) {
+               offset = index << shift;
+               desc = iommu->qi->desc + offset;
+               if (ecap_smts(iommu->ecap))
+                       seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
+                                  index, desc->qw0, desc->qw1,
+                                  desc->qw2, desc->qw3,
+                                  iommu->qi->desc_status[index]);
+               else
+                       seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
+                                  index, desc->qw0, desc->qw1,
+                                  iommu->qi->desc_status[index]);
+       }
+}
+
+static int invalidation_queue_show(struct seq_file *m, void *unused)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       struct q_inval *qi;
+       int shift;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               qi = iommu->qi;
+               shift = qi_shift(iommu);
+
+               if (!qi || !ecap_qis(iommu->ecap))
+                       continue;
+
+               seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
+
+               raw_spin_lock_irqsave(&qi->q_lock, flags);
+               seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
+                          (u64)virt_to_phys(qi->desc),
+                          dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
+                          dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
+               invalidation_queue_entry_show(m, iommu);
+               raw_spin_unlock_irqrestore(&qi->q_lock, flags);
+               seq_putc(m, '\n');
+       }
+       rcu_read_unlock();
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
+
 #ifdef CONFIG_IRQ_REMAP
 static void ir_tbl_remap_entry_show(struct seq_file *m,
                                    struct intel_iommu *iommu)
@@ -490,6 +550,8 @@ void __init intel_iommu_debugfs_init(void)
        debugfs_create_file("domain_translation_struct", 0444,
                            intel_iommu_debug, NULL,
                            &domain_translation_struct_fops);
+       debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
+                           NULL, &invalidation_queue_fops);
 #ifdef CONFIG_IRQ_REMAP
        debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
                            NULL, &ir_translation_struct_fops);
index b906727f5b85f810e9500b1a3af0cb9a736d5548..648a785e078a2d1a2cee3fb7aca2f755203b3446 100644 (file)
@@ -296,31 +296,6 @@ static inline void context_clear_entry(struct context_entry *context)
 static struct dmar_domain *si_domain;
 static int hw_pass_through = 1;
 
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY            BIT(0)
-
-/*
- * This is a DMA domain allocated through the iommu domain allocation
- * interface. But one or more devices belonging to this domain have
- * been chosen to use a private domain. We should avoid to use the
- * map/unmap/iova_to_phys APIs on it.
- */
-#define DOMAIN_FLAG_LOSE_CHILDREN              BIT(1)
-
-/*
- * When VT-d works in the scalable mode, it allows DMA translation to
- * happen through either first level or second level page table. This
- * bit marks that the DMA translation for the domain goes through the
- * first level page table, otherwise, it goes through the second level.
- */
-#define DOMAIN_FLAG_USE_FIRST_LEVEL            BIT(2)
-
-/*
- * Domain represents a virtual machine which demands iommu nested
- * translation mode support.
- */
-#define DOMAIN_FLAG_NESTING_MODE               BIT(3)
-
 #define for_each_domain_iommu(idx, domain)                     \
        for (idx = 0; idx < g_num_of_iommus; idx++)             \
                if (domain->iommu_refcnt[idx])
@@ -355,11 +330,6 @@ static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
 static void dmar_remove_one_dev_info(struct device *dev);
 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
-                                struct device *dev);
-static int domain_detach_iommu(struct dmar_domain *domain,
-                              struct intel_iommu *iommu);
-static bool device_is_rmrr_locked(struct device *dev);
 static int intel_iommu_attach_device(struct iommu_domain *domain,
                                     struct device *dev);
 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -371,11 +341,11 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
 
-#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
 int intel_iommu_sm = 1;
 #else
 int intel_iommu_sm;
-#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
 
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
@@ -395,6 +365,21 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
+struct device_domain_info *get_domain_info(struct device *dev)
+{
+       struct device_domain_info *info;
+
+       if (!dev)
+               return NULL;
+
+       info = dev->archdata.iommu;
+       if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
+                    info == DEFER_DEVICE_DOMAIN_INFO))
+               return NULL;
+
+       return info;
+}
+
 DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
@@ -446,12 +431,6 @@ static void init_translation_status(struct intel_iommu *iommu)
                iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
 }
 
-/* Convert generic 'struct iommu_domain to private struct dmar_domain */
-static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
-{
-       return container_of(dom, struct dmar_domain, domain);
-}
-
 static int __init intel_iommu_setup(char *str)
 {
        if (!str)
@@ -480,8 +459,7 @@ static int __init intel_iommu_setup(char *str)
                        pr_info("Intel-IOMMU: scalable mode supported\n");
                        intel_iommu_sm = 1;
                } else if (!strncmp(str, "tboot_noforce", 13)) {
-                       printk(KERN_INFO
-                               "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
+                       pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
                        intel_iommu_tboot_noforce = 1;
                } else if (!strncmp(str, "nobounce", 8)) {
                        pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
@@ -1454,8 +1432,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
            !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
                info->pri_enabled = 1;
 #endif
-       if (!pdev->untrusted && info->ats_supported &&
-           pci_ats_page_aligned(pdev) &&
+       if (info->ats_supported && pci_ats_page_aligned(pdev) &&
            !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
                info->ats_enabled = 1;
                domain_update_iotlb(info->domain);
@@ -1763,6 +1740,9 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
                if (ecap_prs(iommu->ecap))
                        intel_svm_finish_prq(iommu);
        }
+       if (ecap_vcs(iommu->ecap) && vccap_pasid(iommu->vccap))
+               ioasid_unregister_allocator(&iommu->pasid_allocator);
+
 #endif
 }
 
@@ -1911,11 +1891,6 @@ static int dmar_init_reserved_ranges(void)
        return 0;
 }
 
-static void domain_reserve_special_ranges(struct dmar_domain *domain)
-{
-       copy_reserved_iova(&reserved_iova_list, &domain->iovad);
-}
-
 static inline int guestwidth_to_adjustwidth(int gaw)
 {
        int agaw;
@@ -1930,65 +1905,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
-                      int guest_width)
-{
-       int adjust_width, agaw;
-       unsigned long sagaw;
-       int ret;
-
-       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
-       if (!intel_iommu_strict) {
-               ret = init_iova_flush_queue(&domain->iovad,
-                                           iommu_flush_iova, iova_entry_free);
-               if (ret)
-                       pr_info("iova flush queue initialization failed\n");
-       }
-
-       domain_reserve_special_ranges(domain);
-
-       /* calculate AGAW */
-       if (guest_width > cap_mgaw(iommu->cap))
-               guest_width = cap_mgaw(iommu->cap);
-       domain->gaw = guest_width;
-       adjust_width = guestwidth_to_adjustwidth(guest_width);
-       agaw = width_to_agaw(adjust_width);
-       sagaw = cap_sagaw(iommu->cap);
-       if (!test_bit(agaw, &sagaw)) {
-               /* hardware doesn't support it, choose a bigger one */
-               pr_debug("Hardware doesn't support agaw %d\n", agaw);
-               agaw = find_next_bit(&sagaw, 5, agaw);
-               if (agaw >= 5)
-                       return -ENODEV;
-       }
-       domain->agaw = agaw;
-
-       if (ecap_coherent(iommu->ecap))
-               domain->iommu_coherency = 1;
-       else
-               domain->iommu_coherency = 0;
-
-       if (ecap_sc_support(iommu->ecap))
-               domain->iommu_snooping = 1;
-       else
-               domain->iommu_snooping = 0;
-
-       if (intel_iommu_superpage)
-               domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
-       else
-               domain->iommu_superpage = 0;
-
-       domain->nid = iommu->node;
-
-       /* always allocate the top pgd */
-       domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
-       if (!domain->pgd)
-               return -ENOMEM;
-       __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
-       return 0;
-}
-
 static void domain_exit(struct dmar_domain *domain)
 {
 
@@ -1996,7 +1912,8 @@ static void domain_exit(struct dmar_domain *domain)
        domain_remove_dev_info(domain);
 
        /* destroy iovas */
-       put_iova_domain(&domain->iovad);
+       if (domain->domain.type == IOMMU_DOMAIN_DMA)
+               put_iova_domain(&domain->iovad);
 
        if (domain->pgd) {
                struct page *freelist;
@@ -2518,11 +2435,8 @@ struct dmar_domain *find_domain(struct device *dev)
        if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
                return NULL;
 
-       if (dev_is_pci(dev))
-               dev = &pci_real_dma_dev(to_pci_dev(dev))->dev;
-
        /* No lock here, assumes no domain exit in normal case */
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (likely(info))
                return info->domain;
 
@@ -2545,7 +2459,7 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
        struct device_domain_info *info;
 
        list_for_each_entry(info, &device_domain_list, global)
-               if (info->iommu->segment == segment && info->bus == bus &&
+               if (info->segment == segment && info->bus == bus &&
                    info->devfn == devfn)
                        return info;
 
@@ -2582,6 +2496,12 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
                                             flags);
 }
 
+static bool dev_is_real_dma_subdevice(struct device *dev)
+{
+       return dev && dev_is_pci(dev) &&
+              pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
+}
+
 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
                                                    int bus, int devfn,
                                                    struct device *dev,
@@ -2596,8 +2516,18 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (!info)
                return NULL;
 
-       info->bus = bus;
-       info->devfn = devfn;
+       if (!dev_is_real_dma_subdevice(dev)) {
+               info->bus = bus;
+               info->devfn = devfn;
+               info->segment = iommu->segment;
+       } else {
+               struct pci_dev *pdev = to_pci_dev(dev);
+
+               info->bus = pdev->bus->number;
+               info->devfn = pdev->devfn;
+               info->segment = pci_domain_nr(pdev->bus);
+       }
+
        info->ats_supported = info->pasid_supported = info->pri_supported = 0;
        info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
        info->ats_qdep = 0;
@@ -2611,10 +2541,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (dev && dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(info->dev);
 
-               if (!pdev->untrusted &&
-                   !pci_ats_disabled() &&
-                   ecap_dev_iotlb_support(iommu->ecap) &&
-                   pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
+               if (ecap_dev_iotlb_support(iommu->ecap) &&
+                   pci_ats_supported(pdev) &&
                    dmar_find_matched_atsr_unit(pdev))
                        info->ats_supported = 1;
 
@@ -2637,7 +2565,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
 
        if (!found) {
                struct device_domain_info *info2;
-               info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
+               info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
+                                                      info->devfn);
                if (info2) {
                        found      = info2->domain;
                        info2->dev = dev;
@@ -2704,108 +2633,10 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        return domain;
 }
 
-static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
-{
-       *(u16 *)opaque = alias;
-       return 0;
-}
-
-static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
-{
-       struct device_domain_info *info;
-       struct dmar_domain *domain = NULL;
-       struct intel_iommu *iommu;
-       u16 dma_alias;
-       unsigned long flags;
-       u8 bus, devfn;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return NULL;
-
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
-               spin_lock_irqsave(&device_domain_lock, flags);
-               info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
-                                                     PCI_BUS_NUM(dma_alias),
-                                                     dma_alias & 0xff);
-               if (info) {
-                       iommu = info->iommu;
-                       domain = info->domain;
-               }
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-
-               /* DMA alias already has a domain, use it */
-               if (info)
-                       goto out;
-       }
-
-       /* Allocate and initialize new domain for the device */
-       domain = alloc_domain(0);
-       if (!domain)
-               return NULL;
-       if (domain_init(domain, iommu, gaw)) {
-               domain_exit(domain);
-               return NULL;
-       }
-
-out:
-       return domain;
-}
-
-static struct dmar_domain *set_domain_for_dev(struct device *dev,
-                                             struct dmar_domain *domain)
-{
-       struct intel_iommu *iommu;
-       struct dmar_domain *tmp;
-       u16 req_id, dma_alias;
-       u8 bus, devfn;
-
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
-               return NULL;
-
-       req_id = ((u16)bus << 8) | devfn;
-
-       if (dev_is_pci(dev)) {
-               struct pci_dev *pdev = to_pci_dev(dev);
-
-               pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
-               /* register PCI DMA alias device */
-               if (req_id != dma_alias) {
-                       tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-                                       dma_alias & 0xff, NULL, domain);
-
-                       if (!tmp || tmp != domain)
-                               return tmp;
-               }
-       }
-
-       tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
-       if (!tmp || tmp != domain)
-               return tmp;
-
-       return domain;
-}
-
 static int iommu_domain_identity_map(struct dmar_domain *domain,
-                                    unsigned long long start,
-                                    unsigned long long end)
+                                    unsigned long first_vpfn,
+                                    unsigned long last_vpfn)
 {
-       unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
-       unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
-
-       if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
-                         dma_to_mm_pfn(last_vpfn))) {
-               pr_err("Reserving iova failed\n");
-               return -ENOMEM;
-       }
-
-       pr_debug("Mapping reserved region %llx-%llx\n", start, end);
        /*
         * RMRR range might have overlap with physical memory range,
         * clear it first
@@ -2817,45 +2648,6 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
                                DMA_PTE_READ|DMA_PTE_WRITE);
 }
 
-static int domain_prepare_identity_map(struct device *dev,
-                                      struct dmar_domain *domain,
-                                      unsigned long long start,
-                                      unsigned long long end)
-{
-       /* For _hardware_ passthrough, don't bother. But for software
-          passthrough, we do it anyway -- it may indicate a memory
-          range which is reserved in E820, so which didn't get set
-          up to start with in si_domain */
-       if (domain == si_domain && hw_pass_through) {
-               dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
-                        start, end);
-               return 0;
-       }
-
-       dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
-
-       if (end < start) {
-               WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
-                       "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                       dmi_get_system_info(DMI_BIOS_VENDOR),
-                       dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               return -EIO;
-       }
-
-       if (end >> agaw_to_width(domain->agaw)) {
-               WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
-                    "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-                    agaw_to_width(domain->agaw),
-                    dmi_get_system_info(DMI_BIOS_VENDOR),
-                    dmi_get_system_info(DMI_BIOS_VERSION),
-                    dmi_get_system_info(DMI_PRODUCT_VERSION));
-               return -EIO;
-       }
-
-       return iommu_domain_identity_map(domain, start, end);
-}
-
 static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
@@ -2882,7 +2674,8 @@ static int __init si_domain_init(int hw)
 
                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
                        ret = iommu_domain_identity_map(si_domain,
-                                       PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+                                       mm_to_dma_pfn(start_pfn),
+                                       mm_to_dma_pfn(end_pfn));
                        if (ret)
                                return ret;
                }
@@ -2911,17 +2704,6 @@ static int __init si_domain_init(int hw)
        return 0;
 }
 
-static int identity_mapping(struct device *dev)
-{
-       struct device_domain_info *info;
-
-       info = dev->archdata.iommu;
-       if (info)
-               return (info->domain == si_domain);
-
-       return 0;
-}
-
 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
        struct dmar_domain *ndomain;
@@ -3048,31 +2830,6 @@ static int device_def_domain_type(struct device *dev)
 
                if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
                        return IOMMU_DOMAIN_IDENTITY;
-
-               /*
-                * We want to start off with all devices in the 1:1 domain, and
-                * take them out later if we find they can't access all of memory.
-                *
-                * However, we can't do this for PCI devices behind bridges,
-                * because all PCI devices behind the same bridge will end up
-                * with the same source-id on their transactions.
-                *
-                * Practically speaking, we can't change things around for these
-                * devices at run-time, because we can't be sure there'll be no
-                * DMA transactions in flight for any of their siblings.
-                *
-                * So PCI devices (unless they're on the root bus) as well as
-                * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
-                * the 1:1 domain, just in _case_ one of their siblings turns out
-                * not to be able to map all of memory.
-                */
-               if (!pci_is_pcie(pdev)) {
-                       if (!pci_is_root_bus(pdev->bus))
-                               return IOMMU_DOMAIN_DMA;
-                       if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
-                               return IOMMU_DOMAIN_DMA;
-               } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
-                       return IOMMU_DOMAIN_DMA;
        }
 
        return 0;
@@ -3297,6 +3054,85 @@ out_unmap:
        return ret;
 }
 
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
+{
+       struct intel_iommu *iommu = data;
+       ioasid_t ioasid;
+
+       if (!iommu)
+               return INVALID_IOASID;
+       /*
+        * VT-d virtual command interface always uses the full 20 bit
+        * PASID range. Host can partition guest PASID range based on
+        * policies but it is out of guest's control.
+        */
+       if (min < PASID_MIN || max > intel_pasid_max_id)
+               return INVALID_IOASID;
+
+       if (vcmd_alloc_pasid(iommu, &ioasid))
+               return INVALID_IOASID;
+
+       return ioasid;
+}
+
+static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
+{
+       struct intel_iommu *iommu = data;
+
+       if (!iommu)
+               return;
+       /*
+        * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
+        * We can only free the PASID when all the devices are unbound.
+        */
+       if (ioasid_find(NULL, ioasid, NULL)) {
+               pr_alert("Cannot free active IOASID %d\n", ioasid);
+               return;
+       }
+       vcmd_free_pasid(iommu, ioasid);
+}
+
+static void register_pasid_allocator(struct intel_iommu *iommu)
+{
+       /*
+        * If we are running in the host, no need for custom allocator
+        * in that PASIDs are allocated from the host system-wide.
+        */
+       if (!cap_caching_mode(iommu->cap))
+               return;
+
+       if (!sm_supported(iommu)) {
+               pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
+               return;
+       }
+
+       /*
+        * Register a custom PASID allocator if we are running in a guest,
+        * guest PASID must be obtained via virtual command interface.
+        * There can be multiple vIOMMUs in each guest but only one allocator
+        * is active. All vIOMMU allocators will eventually be calling the same
+        * host allocator.
+        */
+       if (!ecap_vcs(iommu->ecap) || !vccap_pasid(iommu->vccap))
+               return;
+
+       pr_info("Register custom PASID allocator\n");
+       iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
+       iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
+       iommu->pasid_allocator.pdata = (void *)iommu;
+       if (ioasid_register_allocator(&iommu->pasid_allocator)) {
+               pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
+               /*
+                * Disable scalable mode on this IOMMU if there
+                * is no custom allocator. Mixing SM capable vIOMMU
+                * and non-SM vIOMMU are not supported.
+                */
+               intel_iommu_sm = 0;
+       }
+}
+#endif
+
 static int __init init_dmars(void)
 {
        struct dmar_drhd_unit *drhd;
@@ -3414,6 +3250,9 @@ static int __init init_dmars(void)
         */
        for_each_active_iommu(iommu, drhd) {
                iommu_flush_write_buffer(iommu);
+#ifdef CONFIG_INTEL_IOMMU_SVM
+               register_pasid_allocator(iommu);
+#endif
                iommu_set_root_entry(iommu);
                iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
                iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
@@ -3531,100 +3370,6 @@ static unsigned long intel_alloc_iova(struct device *dev,
        return iova_pfn;
 }
 
-static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
-{
-       struct dmar_domain *domain, *tmp;
-       struct dmar_rmrr_unit *rmrr;
-       struct device *i_dev;
-       int i, ret;
-
-       /* Device shouldn't be attached by any domains. */
-       domain = find_domain(dev);
-       if (domain)
-               return NULL;
-
-       domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
-       if (!domain)
-               goto out;
-
-       /* We have a new domain - setup possible RMRRs for the device */
-       rcu_read_lock();
-       for_each_rmrr_units(rmrr) {
-               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
-                                         i, i_dev) {
-                       if (i_dev != dev)
-                               continue;
-
-                       ret = domain_prepare_identity_map(dev, domain,
-                                                         rmrr->base_address,
-                                                         rmrr->end_address);
-                       if (ret)
-                               dev_err(dev, "Mapping reserved region failed\n");
-               }
-       }
-       rcu_read_unlock();
-
-       tmp = set_domain_for_dev(dev, domain);
-       if (!tmp || domain != tmp) {
-               domain_exit(domain);
-               domain = tmp;
-       }
-
-out:
-       if (!domain)
-               dev_err(dev, "Allocating domain failed\n");
-       else
-               domain->domain.type = IOMMU_DOMAIN_DMA;
-
-       return domain;
-}
-
-/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_need_mapping(struct device *dev)
-{
-       int ret;
-
-       if (iommu_dummy(dev))
-               return false;
-
-       if (unlikely(attach_deferred(dev)))
-               do_deferred_attach(dev);
-
-       ret = identity_mapping(dev);
-       if (ret) {
-               u64 dma_mask = *dev->dma_mask;
-
-               if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
-                       dma_mask = dev->coherent_dma_mask;
-
-               if (dma_mask >= dma_direct_get_required_mask(dev))
-                       return false;
-
-               /*
-                * 32 bit DMA is removed from si_domain and fall back to
-                * non-identity mapping.
-                */
-               dmar_remove_one_dev_info(dev);
-               ret = iommu_request_dma_domain_for_dev(dev);
-               if (ret) {
-                       struct iommu_domain *domain;
-                       struct dmar_domain *dmar_domain;
-
-                       domain = iommu_get_domain_for_dev(dev);
-                       if (domain) {
-                               dmar_domain = to_dmar_domain(domain);
-                               dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
-                       }
-                       dmar_remove_one_dev_info(dev);
-                       get_private_domain_for_dev(dev);
-               }
-
-               dev_info(dev, "32bit DMA uses non-identity mapping\n");
-       }
-
-       return true;
-}
-
 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
                                     size_t size, int dir, u64 dma_mask)
 {
@@ -3638,6 +3383,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        BUG_ON(dir == DMA_NONE);
 
+       if (unlikely(attach_deferred(dev)))
+               do_deferred_attach(dev);
+
        domain = find_domain(dev);
        if (!domain)
                return DMA_MAPPING_ERROR;
@@ -3689,20 +3437,15 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               return __intel_map_single(dev, page_to_phys(page) + offset,
-                               size, dir, *dev->dma_mask);
-       return dma_direct_map_page(dev, page, offset, size, dir, attrs);
+       return __intel_map_single(dev, page_to_phys(page) + offset,
+                                 size, dir, *dev->dma_mask);
 }
 
 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
                                     size_t size, enum dma_data_direction dir,
                                     unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               return __intel_map_single(dev, phys_addr, size, dir,
-                               *dev->dma_mask);
-       return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
+       return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,17 +3496,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
                             size_t size, enum dma_data_direction dir,
                             unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               intel_unmap(dev, dev_addr, size);
-       else
-               dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+       intel_unmap(dev, dev_addr, size);
 }
 
 static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       if (iommu_need_mapping(dev))
-               intel_unmap(dev, dev_addr, size);
+       intel_unmap(dev, dev_addr, size);
 }
 
 static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3773,8 +3512,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
        struct page *page = NULL;
        int order;
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+       if (unlikely(attach_deferred(dev)))
+               do_deferred_attach(dev);
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -3809,9 +3548,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
        int order;
        struct page *page = virt_to_page(vaddr);
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
-
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
@@ -3829,9 +3565,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct scatterlist *sg;
        int i;
 
-       if (!iommu_need_mapping(dev))
-               return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
-
        for_each_sg(sglist, sg, nelems, i) {
                nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
        }
@@ -3855,8 +3588,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
-       if (!iommu_need_mapping(dev))
-               return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
+
+       if (unlikely(attach_deferred(dev)))
+               do_deferred_attach(dev);
 
        domain = find_domain(dev);
        if (!domain)
@@ -3903,8 +3637,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
 
 static u64 intel_get_required_mask(struct device *dev)
 {
-       if (!iommu_need_mapping(dev))
-               return dma_direct_get_required_mask(dev);
        return DMA_BIT_MASK(32);
 }
 
@@ -4813,58 +4545,37 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
                                       unsigned long val, void *v)
 {
        struct memory_notify *mhp = v;
-       unsigned long long start, end;
-       unsigned long start_vpfn, last_vpfn;
+       unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
+       unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
+                       mhp->nr_pages - 1);
 
        switch (val) {
        case MEM_GOING_ONLINE:
-               start = mhp->start_pfn << PAGE_SHIFT;
-               end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
-               if (iommu_domain_identity_map(si_domain, start, end)) {
-                       pr_warn("Failed to build identity map for [%llx-%llx]\n",
-                               start, end);
+               if (iommu_domain_identity_map(si_domain,
+                                             start_vpfn, last_vpfn)) {
+                       pr_warn("Failed to build identity map for [%lx-%lx]\n",
+                               start_vpfn, last_vpfn);
                        return NOTIFY_BAD;
                }
                break;
 
        case MEM_OFFLINE:
        case MEM_CANCEL_ONLINE:
-               start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
-               last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
-               while (start_vpfn <= last_vpfn) {
-                       struct iova *iova;
+               {
                        struct dmar_drhd_unit *drhd;
                        struct intel_iommu *iommu;
                        struct page *freelist;
 
-                       iova = find_iova(&si_domain->iovad, start_vpfn);
-                       if (iova == NULL) {
-                               pr_debug("Failed get IOVA for PFN %lx\n",
-                                        start_vpfn);
-                               break;
-                       }
-
-                       iova = split_and_remove_iova(&si_domain->iovad, iova,
-                                                    start_vpfn, last_vpfn);
-                       if (iova == NULL) {
-                               pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
-                                       start_vpfn, last_vpfn);
-                               return NOTIFY_BAD;
-                       }
-
-                       freelist = domain_unmap(si_domain, iova->pfn_lo,
-                                              iova->pfn_hi);
+                       freelist = domain_unmap(si_domain,
+                                               start_vpfn, last_vpfn);
 
                        rcu_read_lock();
                        for_each_active_iommu(iommu, drhd)
                                iommu_flush_iotlb_psi(iommu, si_domain,
-                                       iova->pfn_lo, iova_size(iova),
+                                       start_vpfn, mhp->nr_pages,
                                        !freelist, 0);
                        rcu_read_unlock();
                        dma_free_pagelist(freelist);
-
-                       start_vpfn = iova->pfn_hi + 1;
-                       free_iova_mem(iova);
                }
                break;
        }
@@ -4892,8 +4603,9 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
                for (did = 0; did < cap_ndoms(iommu->cap); did++) {
                        domain = get_iommu_domain(iommu, (u16)did);
 
-                       if (!domain)
+                       if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA)
                                continue;
+
                        free_cpu_cached_iovas(cpu, &domain->iovad);
                }
        }
@@ -5186,18 +4898,6 @@ int __init intel_iommu_init(void)
        }
        up_write(&dmar_global_lock);
 
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-       /*
-        * If the system has no untrusted device or the user has decided
-        * to disable the bounce page mechanisms, we don't need swiotlb.
-        * Mark this and the pre-allocated bounce pages will be released
-        * later.
-        */
-       if (!has_untrusted_dev() || intel_no_bounce)
-               swiotlb = 0;
-#endif
-       dma_ops = &intel_dma_ops;
-
        init_iommu_pm_ops();
 
        down_read(&dmar_global_lock);
@@ -5283,10 +4983,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
        if (info->dev) {
                if (dev_is_pci(info->dev) && sm_supported(iommu))
                        intel_pasid_tear_down_entry(iommu, info->dev,
-                                       PASID_RID2PASID);
+                                       PASID_RID2PASID, false);
 
                iommu_disable_dev_iotlb(info);
-               domain_context_clear(iommu, info->dev);
+               if (!dev_is_real_dma_subdevice(info->dev))
+                       domain_context_clear(iommu, info->dev);
                intel_pasid_free_table(info->dev);
        }
 
@@ -5296,12 +4997,6 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
        domain_detach_iommu(domain, iommu);
        spin_unlock_irqrestore(&iommu->lock, flags);
 
-       /* free the private domain */
-       if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
-           !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
-           list_empty(&domain->devices))
-               domain_exit(info->domain);
-
        free_devinfo_mem(info);
 }
 
@@ -5311,9 +5006,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       info = dev->archdata.iommu;
-       if (info && info != DEFER_DEVICE_DOMAIN_INFO
-           && info != DUMMY_DEVICE_DOMAIN_INFO)
+       info = get_domain_info(dev);
+       if (info)
                __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -5322,9 +5016,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
 {
        int adjust_width;
 
-       init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-       domain_reserve_special_ranges(domain);
-
        /* calculate AGAW */
        domain->gaw = guest_width;
        adjust_width = guestwidth_to_adjustwidth(guest_width);
@@ -5343,11 +5034,21 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
        return 0;
 }
 
+static void intel_init_iova_domain(struct dmar_domain *dmar_domain)
+{
+       init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+       copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad);
+
+       if (!intel_iommu_strict &&
+           init_iova_flush_queue(&dmar_domain->iovad,
+                                 iommu_flush_iova, iova_entry_free))
+               pr_info("iova flush queue initialization failed\n");
+}
+
 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
 {
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
-       int ret;
 
        switch (type) {
        case IOMMU_DOMAIN_DMA:
@@ -5364,13 +5065,8 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
                        return NULL;
                }
 
-               if (!intel_iommu_strict && type == IOMMU_DOMAIN_DMA) {
-                       ret = init_iova_flush_queue(&dmar_domain->iovad,
-                                                   iommu_flush_iova,
-                                                   iova_entry_free);
-                       if (ret)
-                               pr_info("iova flush queue initialization failed\n");
-               }
+               if (type == IOMMU_DOMAIN_DMA)
+                       intel_init_iova_domain(dmar_domain);
 
                domain_update_iommu_cap(dmar_domain);
 
@@ -5403,7 +5099,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
 static inline bool
 is_aux_domain(struct device *dev, struct iommu_domain *domain)
 {
-       struct device_domain_info *info = dev->archdata.iommu;
+       struct device_domain_info *info = get_domain_info(dev);
 
        return info && info->auxd_enabled &&
                        domain->type == IOMMU_DOMAIN_UNMANAGED;
@@ -5412,7 +5108,7 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
 static void auxiliary_link_device(struct dmar_domain *domain,
                                  struct device *dev)
 {
-       struct device_domain_info *info = dev->archdata.iommu;
+       struct device_domain_info *info = get_domain_info(dev);
 
        assert_spin_locked(&device_domain_lock);
        if (WARN_ON(!info))
@@ -5425,7 +5121,7 @@ static void auxiliary_link_device(struct dmar_domain *domain,
 static void auxiliary_unlink_device(struct dmar_domain *domain,
                                    struct device *dev)
 {
-       struct device_domain_info *info = dev->archdata.iommu;
+       struct device_domain_info *info = get_domain_info(dev);
 
        assert_spin_locked(&device_domain_lock);
        if (WARN_ON(!info))
@@ -5513,13 +5209,13 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
                return;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        iommu = info->iommu;
 
        auxiliary_unlink_device(domain, dev);
 
        spin_lock(&iommu->lock);
-       intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+       intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
        domain_detach_iommu(domain, iommu);
        spin_unlock(&iommu->lock);
 
@@ -5626,6 +5322,176 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
        aux_domain_remove_dev(to_dmar_domain(domain), dev);
 }
 
+/*
+ * 2D array for converting and sanitizing IOMMU generic TLB granularity to
+ * VT-d granularity. Invalidation is typically included in the unmap operation
+ * as a result of DMA or VFIO unmap. However, for assigned devices guest
+ * owns the first level page tables. Invalidations of translation caches in the
+ * guest are trapped and passed down to the host.
+ *
+ * vIOMMU in the guest will only expose first level page tables, therefore
+ * we do not support IOTLB granularity for request without PASID (second level).
+ *
+ * For example, to find the VT-d granularity encoding for IOTLB
+ * type and page selective granularity within PASID:
+ * X: indexed by iommu cache type
+ * Y: indexed by enum iommu_inv_granularity
+ * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
+ */
+
+static const int
+inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
+       /*
+        * PASID based IOTLB invalidation: PASID selective (per PASID),
+        * page selective (address granularity)
+        */
+       {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
+       /* PASID based dev TLBs */
+       {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
+       /* PASID cache */
+       {-EINVAL, -EINVAL, -EINVAL}
+};
+
+static inline int to_vtd_granularity(int type, int granu)
+{
+       return inv_type_granu_table[type][granu];
+}
+
+static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
+{
+       u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
+
+       /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
+        * IOMMU cache invalidate API passes granu_size in bytes, and number of
+        * granu size in contiguous memory.
+        */
+       return order_base_2(nr_pages);
+}
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+static int
+intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
+                          struct iommu_cache_invalidate_info *inv_info)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       int cache_type;
+       u8 bus, devfn;
+       u16 did, sid;
+       int ret = 0;
+       u64 size = 0;
+
+       if (!inv_info || !dmar_domain ||
+           inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+               return -EINVAL;
+
+       if (!dev || !dev_is_pci(dev))
+               return -ENODEV;
+
+       iommu = device_to_iommu(dev, &bus, &devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
+               return -EINVAL;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       spin_lock(&iommu->lock);
+       info = get_domain_info(dev);
+       if (!info) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       did = dmar_domain->iommu_did[iommu->seq_id];
+       sid = PCI_DEVID(bus, devfn);
+
+       /* Size is only valid in address selective invalidation */
+       if (inv_info->granularity != IOMMU_INV_GRANU_PASID)
+               size = to_vtd_size(inv_info->addr_info.granule_size,
+                                  inv_info->addr_info.nb_granules);
+
+       for_each_set_bit(cache_type,
+                        (unsigned long *)&inv_info->cache,
+                        IOMMU_CACHE_INV_TYPE_NR) {
+               int granu = 0;
+               u64 pasid = 0;
+
+               granu = to_vtd_granularity(cache_type, inv_info->granularity);
+               if (granu == -EINVAL) {
+                       pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
+                                          cache_type, inv_info->granularity);
+                       break;
+               }
+
+               /*
+                * PASID is stored in different locations based on the
+                * granularity.
+                */
+               if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
+                   (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
+                       pasid = inv_info->pasid_info.pasid;
+               else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+                        (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
+                       pasid = inv_info->addr_info.pasid;
+
+               switch (BIT(cache_type)) {
+               case IOMMU_CACHE_INV_TYPE_IOTLB:
+                       if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
+                           size &&
+                           (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
+                               pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n",
+                                                  inv_info->addr_info.addr, size);
+                               ret = -ERANGE;
+                               goto out_unlock;
+                       }
+
+                       /*
+                        * If granu is PASID-selective, address is ignored.
+                        * We use npages = -1 to indicate that.
+                        */
+                       qi_flush_piotlb(iommu, did, pasid,
+                                       mm_to_dma_pfn(inv_info->addr_info.addr),
+                                       (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
+                                       inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
+
+                       /*
+                        * Always flush device IOTLB if ATS is enabled. vIOMMU
+                        * in the guest may assume IOTLB flush is inclusive,
+                        * which is more efficient.
+                        */
+                       if (info->ats_enabled)
+                               qi_flush_dev_iotlb_pasid(iommu, sid,
+                                               info->pfsid, pasid,
+                                               info->ats_qdep,
+                                               inv_info->addr_info.addr,
+                                               size, granu);
+                       break;
+               case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
+                       if (info->ats_enabled)
+                               qi_flush_dev_iotlb_pasid(iommu, sid,
+                                               info->pfsid, pasid,
+                                               info->ats_qdep,
+                                               inv_info->addr_info.addr,
+                                               size, granu);
+                       else
+                               pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
+                       break;
+               default:
+                       dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
+                                           cache_type);
+                       ret = -EINVAL;
+               }
+       }
+out_unlock:
+       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return ret;
+}
+#endif
+
 static int intel_iommu_map(struct iommu_domain *domain,
                           unsigned long iova, phys_addr_t hpa,
                           size_t size, int iommu_prot, gfp_t gfp)
@@ -5793,11 +5659,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
        if (translation_pre_enabled(iommu))
                dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
 
-       if (device_needs_bounce(dev)) {
-               dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
-               set_dma_ops(dev, &bounce_dma_ops);
-       }
-
        return &iommu->iommu;
 }
 
@@ -5812,7 +5673,19 @@ static void intel_iommu_release_device(struct device *dev)
 
        dmar_remove_one_dev_info(dev);
 
+       set_dma_ops(dev, NULL);
+}
+
+static void intel_iommu_probe_finalize(struct device *dev)
+{
+       struct iommu_domain *domain;
+
+       domain = iommu_get_domain_for_dev(dev);
        if (device_needs_bounce(dev))
+               set_dma_ops(dev, &bounce_dma_ops);
+       else if (domain && domain->type == IOMMU_DOMAIN_DMA)
+               set_dma_ops(dev, &intel_dma_ops);
+       else
                set_dma_ops(dev, NULL);
 }
 
@@ -5890,7 +5763,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
        spin_lock(&iommu->lock);
 
        ret = -EINVAL;
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!info || !info->pasid_supported)
                goto out;
 
@@ -5986,7 +5859,7 @@ static int intel_iommu_enable_auxd(struct device *dev)
                return -ENODEV;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        info->auxd_enabled = 1;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
@@ -5999,7 +5872,7 @@ static int intel_iommu_disable_auxd(struct device *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!WARN_ON(!info))
                info->auxd_enabled = 0;
        spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -6052,6 +5925,14 @@ intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
                return !!siov_find_pci_dvsec(to_pci_dev(dev));
        }
 
+       if (feat == IOMMU_DEV_FEAT_SVA) {
+               struct device_domain_info *info = get_domain_info(dev);
+
+               return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
+                       info->pasid_supported && info->pri_supported &&
+                       info->ats_supported;
+       }
+
        return false;
 }
 
@@ -6061,6 +5942,16 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
        if (feat == IOMMU_DEV_FEAT_AUX)
                return intel_iommu_enable_auxd(dev);
 
+       if (feat == IOMMU_DEV_FEAT_SVA) {
+               struct device_domain_info *info = get_domain_info(dev);
+
+               if (!info)
+                       return -EINVAL;
+
+               if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
+                       return 0;
+       }
+
        return -ENODEV;
 }
 
@@ -6076,7 +5967,7 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
 static bool
 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
 {
-       struct device_domain_info *info = dev->archdata.iommu;
+       struct device_domain_info *info = get_domain_info(dev);
 
        if (feat == IOMMU_DEV_FEAT_AUX)
                return scalable_mode_support() && info && info->auxd_enabled;
@@ -6144,6 +6035,7 @@ const struct iommu_ops intel_iommu_ops = {
        .unmap                  = intel_iommu_unmap,
        .iova_to_phys           = intel_iommu_iova_to_phys,
        .probe_device           = intel_iommu_probe_device,
+       .probe_finalize         = intel_iommu_probe_finalize,
        .release_device         = intel_iommu_release_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = generic_iommu_put_resv_regions,
@@ -6156,6 +6048,14 @@ const struct iommu_ops intel_iommu_ops = {
        .is_attach_deferred     = intel_iommu_is_attach_deferred,
        .def_domain_type        = device_def_domain_type,
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
+#ifdef CONFIG_INTEL_IOMMU_SVM
+       .cache_invalidate       = intel_iommu_sva_invalidate,
+       .sva_bind_gpasid        = intel_svm_bind_gpasid,
+       .sva_unbind_gpasid      = intel_svm_unbind_gpasid,
+       .sva_bind               = intel_svm_bind,
+       .sva_unbind             = intel_svm_unbind,
+       .sva_get_pasid          = intel_svm_get_pasid,
+#endif
 };
 
 static void quirk_iommu_igfx(struct pci_dev *dev)
index 22b30f10b3964e9d59a9a10d9abc8f35b75e59e8..c81f0f17c6baec5295e08ad1c52fc8623b0e45b7 100644 (file)
 static DEFINE_SPINLOCK(pasid_lock);
 u32 intel_pasid_max_id = PASID_MAX;
 
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid)
+{
+       unsigned long flags;
+       u8 status_code;
+       int ret = 0;
+       u64 res;
+
+       raw_spin_lock_irqsave(&iommu->register_lock, flags);
+       dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
+       IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+                     !(res & VCMD_VRSP_IP), res);
+       raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+       status_code = VCMD_VRSP_SC(res);
+       switch (status_code) {
+       case VCMD_VRSP_SC_SUCCESS:
+               *pasid = VCMD_VRSP_RESULT_PASID(res);
+               break;
+       case VCMD_VRSP_SC_NO_PASID_AVAIL:
+               pr_info("IOMMU: %s: No PASID available\n", iommu->name);
+               ret = -ENOSPC;
+               break;
+       default:
+               ret = -ENODEV;
+               pr_warn("IOMMU: %s: Unexpected error code %d\n",
+                       iommu->name, status_code);
+       }
+
+       return ret;
+}
+
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid)
+{
+       unsigned long flags;
+       u8 status_code;
+       u64 res;
+
+       raw_spin_lock_irqsave(&iommu->register_lock, flags);
+       dmar_writeq(iommu->reg + DMAR_VCMD_REG,
+                   VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
+       IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
+                     !(res & VCMD_VRSP_IP), res);
+       raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+       status_code = VCMD_VRSP_SC(res);
+       switch (status_code) {
+       case VCMD_VRSP_SC_SUCCESS:
+               break;
+       case VCMD_VRSP_SC_INVALID_PASID:
+               pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
+               break;
+       default:
+               pr_warn("IOMMU: %s: Unexpected error code %d\n",
+                       iommu->name, status_code);
+       }
+}
+
 /*
  * Per device pasid table management:
  */
@@ -94,7 +151,7 @@ int intel_pasid_alloc_table(struct device *dev)
        int size;
 
        might_sleep();
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
                return -EINVAL;
 
@@ -141,7 +198,7 @@ void intel_pasid_free_table(struct device *dev)
        struct pasid_entry *table;
        int i, max_pde;
 
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!info || !dev_is_pci(dev) || !info->pasid_table)
                return;
 
@@ -167,7 +224,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
 {
        struct device_domain_info *info;
 
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!info)
                return NULL;
 
@@ -178,7 +235,7 @@ int intel_pasid_get_dev_max_id(struct device *dev)
 {
        struct device_domain_info *info;
 
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!info || !info->pasid_table)
                return 0;
 
@@ -199,7 +256,7 @@ struct pasid_entry *intel_pasid_get_entry(struct device *dev, int pasid)
                return NULL;
 
        dir = pasid_table->table;
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        dir_index = pasid >> PASID_PDE_SHIFT;
        index = pasid & PASID_PTE_MASK;
 
@@ -235,7 +292,20 @@ static inline void pasid_clear_entry(struct pasid_entry *pe)
        WRITE_ONCE(pe->val[7], 0);
 }
 
-static void intel_pasid_clear_entry(struct device *dev, int pasid)
+static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
+{
+       WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
+       WRITE_ONCE(pe->val[1], 0);
+       WRITE_ONCE(pe->val[2], 0);
+       WRITE_ONCE(pe->val[3], 0);
+       WRITE_ONCE(pe->val[4], 0);
+       WRITE_ONCE(pe->val[5], 0);
+       WRITE_ONCE(pe->val[6], 0);
+       WRITE_ONCE(pe->val[7], 0);
+}
+
+static void
+intel_pasid_clear_entry(struct device *dev, int pasid, bool fault_ignore)
 {
        struct pasid_entry *pe;
 
@@ -243,7 +313,10 @@ static void intel_pasid_clear_entry(struct device *dev, int pasid)
        if (WARN_ON(!pe))
                return;
 
-       pasid_clear_entry(pe);
+       if (fault_ignore && pasid_pte_is_present(pe))
+               pasid_clear_entry_with_fpd(pe);
+       else
+               pasid_clear_entry(pe);
 }
 
 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
@@ -359,18 +432,29 @@ pasid_set_flpm(struct pasid_entry *pe, u64 value)
        pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
 }
 
+/*
+ * Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
+ * of a scalable mode PASID entry.
+ */
+static inline void
+pasid_set_eafe(struct pasid_entry *pe)
+{
+       pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
+}
+
 static void
 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
                                    u16 did, int pasid)
 {
        struct qi_desc desc;
 
-       desc.qw0 = QI_PC_DID(did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
+       desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) |
+               QI_PC_PASID(pasid) | QI_PC_TYPE;
        desc.qw1 = 0;
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 static void
@@ -384,7 +468,7 @@ iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       qi_submit_sync(&desc, iommu);
+       qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 static void
@@ -394,7 +478,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
        struct device_domain_info *info;
        u16 sid, qdep, pfsid;
 
-       info = dev->archdata.iommu;
+       info = get_domain_info(dev);
        if (!info || !info->ats_enabled)
                return;
 
@@ -405,8 +489,8 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
        qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
 }
 
-void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
-                                struct device *dev, int pasid)
+void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
+                                int pasid, bool fault_ignore)
 {
        struct pasid_entry *pte;
        u16 did;
@@ -416,7 +500,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
                return;
 
        did = pasid_get_domain_id(pte);
-       intel_pasid_clear_entry(dev, pasid);
+       intel_pasid_clear_entry(dev, pasid, fault_ignore);
 
        if (!ecap_coherent(iommu->ecap))
                clflush_cache_range(pte, sizeof(*pte));
@@ -492,13 +576,32 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
        pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 
        /* Setup Present and PASID Granular Transfer Type: */
-       pasid_set_translation_type(pte, 1);
+       pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
        pasid_set_present(pte);
        pasid_flush_caches(iommu, pte, pasid, did);
 
        return 0;
 }
 
+/*
+ * Skip top levels of page tables for iommu which has less agaw
+ * than default. Unnecessary for PT mode.
+ */
+static inline int iommu_skip_agaw(struct dmar_domain *domain,
+                                 struct intel_iommu *iommu,
+                                 struct dma_pte **pgd)
+{
+       int agaw;
+
+       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+               *pgd = phys_to_virt(dma_pte_addr(*pgd));
+               if (!dma_pte_present(*pgd))
+                       return -EINVAL;
+       }
+
+       return agaw;
+}
+
 /*
  * Set up the scalable mode pasid entry for second only translation type.
  */
@@ -522,17 +625,11 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
                return -EINVAL;
        }
 
-       /*
-        * Skip top levels of page tables for iommu which has less agaw
-        * than default. Unnecessary for PT mode.
-        */
        pgd = domain->pgd;
-       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
-               pgd = phys_to_virt(dma_pte_addr(pgd));
-               if (!dma_pte_present(pgd)) {
-                       dev_err(dev, "Invalid domain page table\n");
-                       return -EINVAL;
-               }
+       agaw = iommu_skip_agaw(domain, iommu, &pgd);
+       if (agaw < 0) {
+               dev_err(dev, "Invalid domain page table\n");
+               return -EINVAL;
        }
 
        pgd_val = virt_to_phys(pgd);
@@ -548,7 +645,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
        pasid_set_domain_id(pte, did);
        pasid_set_slptr(pte, pgd_val);
        pasid_set_address_width(pte, agaw);
-       pasid_set_translation_type(pte, 2);
+       pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
        pasid_set_fault_enable(pte);
        pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 
@@ -582,7 +679,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
        pasid_clear_entry(pte);
        pasid_set_domain_id(pte, did);
        pasid_set_address_width(pte, iommu->agaw);
-       pasid_set_translation_type(pte, 4);
+       pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
        pasid_set_fault_enable(pte);
        pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
 
@@ -596,3 +693,161 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
 
        return 0;
 }
+
+static int
+intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
+                           struct iommu_gpasid_bind_data_vtd *pasid_data)
+{
+       /*
+        * Not all guest PASID table entry fields are passed down during bind,
+        * here we only set up the ones that are dependent on guest settings.
+        * Execution related bits such as NXE, SMEP are not supported.
+        * Other fields, such as snoop related, are set based on host needs
+        * regardless of guest settings.
+        */
+       if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
+               if (!ecap_srs(iommu->ecap)) {
+                       pr_err_ratelimited("No supervisor request support on %s\n",
+                                          iommu->name);
+                       return -EINVAL;
+               }
+               pasid_set_sre(pte);
+       }
+
+       if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
+               if (!ecap_eafs(iommu->ecap)) {
+                       pr_err_ratelimited("No extended access flag support on %s\n",
+                                          iommu->name);
+                       return -EINVAL;
+               }
+               pasid_set_eafe(pte);
+       }
+
+       /*
+        * Memory type is only applicable to devices inside processor coherent
+        * domain. Will add MTS support once coherent devices are available.
+        */
+       if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
+               pr_warn_ratelimited("No memory type support %s\n",
+                                   iommu->name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
+ * This could be used for guest shared virtual address. In this case, the
+ * first level page tables are used for GVA-GPA translation in the guest,
+ * second level page tables are used for GPA-HPA translation.
+ *
+ * @iommu:      IOMMU which the device belong to
+ * @dev:        Device to be set up for translation
+ * @gpgd:       FLPTPTR: First Level Page translation pointer in GPA
+ * @pasid:      PASID to be programmed in the device PASID table
+ * @pasid_data: Additional PASID info from the guest bind request
+ * @domain:     Domain info for setting up second level page tables
+ * @addr_width: Address width of the first level (guest)
+ */
+int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
+                            pgd_t *gpgd, int pasid,
+                            struct iommu_gpasid_bind_data_vtd *pasid_data,
+                            struct dmar_domain *domain, int addr_width)
+{
+       struct pasid_entry *pte;
+       struct dma_pte *pgd;
+       int ret = 0;
+       u64 pgd_val;
+       int agaw;
+       u16 did;
+
+       if (!ecap_nest(iommu->ecap)) {
+               pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
+                                  iommu->name);
+               return -EINVAL;
+       }
+
+       if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
+               pr_err_ratelimited("Domain is not in nesting mode, %x\n",
+                                  domain->flags);
+               return -EINVAL;
+       }
+
+       pte = intel_pasid_get_entry(dev, pasid);
+       if (WARN_ON(!pte))
+               return -EINVAL;
+
+       /*
+        * Caller must ensure PASID entry is not in use, i.e. not bind the
+        * same PASID to the same device twice.
+        */
+       if (pasid_pte_is_present(pte))
+               return -EBUSY;
+
+       pasid_clear_entry(pte);
+
+       /* Sanity checking performed by caller to make sure address
+        * width matching in two dimensions:
+        * 1. CPU vs. IOMMU
+        * 2. Guest vs. Host.
+        */
+       switch (addr_width) {
+#ifdef CONFIG_X86
+       case ADDR_WIDTH_5LEVEL:
+               if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
+                   !cap_5lp_support(iommu->cap)) {
+                       dev_err_ratelimited(dev,
+                                           "5-level paging not supported\n");
+                       return -EINVAL;
+               }
+
+               pasid_set_flpm(pte, 1);
+               break;
+#endif
+       case ADDR_WIDTH_4LEVEL:
+               pasid_set_flpm(pte, 0);
+               break;
+       default:
+               dev_err_ratelimited(dev, "Invalid guest address width %d\n",
+                                   addr_width);
+               return -EINVAL;
+       }
+
+       /* First level PGD is in GPA, must be supported by the second level */
+       if ((uintptr_t)gpgd > domain->max_addr) {
+               dev_err_ratelimited(dev,
+                                   "Guest PGD %lx not supported, max %llx\n",
+                                   (uintptr_t)gpgd, domain->max_addr);
+               return -EINVAL;
+       }
+       pasid_set_flptr(pte, (uintptr_t)gpgd);
+
+       ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
+       if (ret)
+               return ret;
+
+       /* Setup the second level based on the given domain */
+       pgd = domain->pgd;
+
+       agaw = iommu_skip_agaw(domain, iommu, &pgd);
+       if (agaw < 0) {
+               dev_err_ratelimited(dev, "Invalid domain page table\n");
+               return -EINVAL;
+       }
+       pgd_val = virt_to_phys(pgd);
+       pasid_set_slptr(pte, pgd_val);
+       pasid_set_fault_enable(pte);
+
+       did = domain->iommu_did[iommu->seq_id];
+       pasid_set_domain_id(pte, did);
+
+       pasid_set_address_width(pte, agaw);
+       pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+
+       pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
+       pasid_set_present(pte);
+       pasid_flush_caches(iommu, pte, pasid, did);
+
+       return ret;
+}
index 92de6df24ccb10d136d003a88c79eb001fab2892..c5318d40e0faed60f33df4ad68eabc1822c2f9dc 100644 (file)
@@ -15,6 +15,7 @@
 #define PASID_MAX                      0x100000
 #define PASID_PTE_MASK                 0x3F
 #define PASID_PTE_PRESENT              1
+#define PASID_PTE_FPD                  2
 #define PDE_PFN_MASK                   PAGE_MASK
 #define PASID_PDE_SHIFT                        6
 #define MAX_NR_PASID_BITS              20
 #define is_pasid_enabled(entry)                (((entry)->lo >> 3) & 0x1)
 #define get_pasid_dir_size(entry)      (1 << ((((entry)->lo >> 9) & 0x7) + 7))
 
+/* Virtual command interface for enlightened pasid management. */
+#define VCMD_CMD_ALLOC                 0x1
+#define VCMD_CMD_FREE                  0x2
+#define VCMD_VRSP_IP                   0x1
+#define VCMD_VRSP_SC(e)                        (((e) >> 1) & 0x3)
+#define VCMD_VRSP_SC_SUCCESS           0
+#define VCMD_VRSP_SC_NO_PASID_AVAIL    1
+#define VCMD_VRSP_SC_INVALID_PASID     1
+#define VCMD_VRSP_RESULT_PASID(e)      (((e) >> 8) & 0xfffff)
+#define VCMD_CMD_OPERAND(e)            ((e) << 8)
 /*
  * Domain ID reserved for pasid entries programmed for first-level
  * only and pass-through transfer modes.
@@ -36,6 +47,7 @@
  * to vmalloc or even module mappings.
  */
 #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
+#define PASID_FLAG_NESTED              BIT(1)
 
 /*
  * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
@@ -51,6 +63,11 @@ struct pasid_entry {
        u64 val[8];
 };
 
+#define PASID_ENTRY_PGTT_FL_ONLY       (1)
+#define PASID_ENTRY_PGTT_SL_ONLY       (2)
+#define PASID_ENTRY_PGTT_NESTED                (3)
+#define PASID_ENTRY_PGTT_PT            (4)
+
 /* The representative of a PASID table */
 struct pasid_table {
        void                    *table;         /* pasid table pointer */
@@ -99,7 +116,13 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
 int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
                                   struct dmar_domain *domain,
                                   struct device *dev, int pasid);
+int intel_pasid_setup_nested(struct intel_iommu *iommu,
+                            struct device *dev, pgd_t *pgd, int pasid,
+                            struct iommu_gpasid_bind_data_vtd *pasid_data,
+                            struct dmar_domain *domain, int addr_width);
 void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
-                                struct device *dev, int pasid);
-
+                                struct device *dev, int pasid,
+                                bool fault_ignore);
+int vcmd_alloc_pasid(struct intel_iommu *iommu, unsigned int *pasid);
+void vcmd_free_pasid(struct intel_iommu *iommu, unsigned int pasid);
 #endif /* __INTEL_PASID_H */
index 2998418f0a383c17944dea2603a5f3d523eb8432..a035ef911fba789dd206cbadd1cbdac96cf3d217 100644 (file)
@@ -23,6 +23,7 @@
 #include "intel-pasid.h"
 
 static irqreturn_t prq_event_thread(int irq, void *d);
+static void intel_svm_drain_prq(struct device *dev, int pasid);
 
 #define PRQ_ORDER 0
 
@@ -66,6 +67,8 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
        dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
        dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
 
+       init_completion(&iommu->prq_complete);
+
        return 0;
 }
 
@@ -138,7 +141,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
        }
        desc.qw2 = 0;
        desc.qw3 = 0;
-       qi_submit_sync(&desc, svm->iommu);
+       qi_submit_sync(svm->iommu, &desc, 1, 0);
 
        if (sdev->dev_iotlb) {
                desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -162,7 +165,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                }
                desc.qw2 = 0;
                desc.qw3 = 0;
-               qi_submit_sync(&desc, svm->iommu);
+               qi_submit_sync(svm->iommu, &desc, 1, 0);
        }
 }
 
@@ -206,10 +209,9 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
         * *has* to handle gracefully without affecting other processes.
         */
        rcu_read_lock();
-       list_for_each_entry_rcu(sdev, &svm->devs, list) {
-               intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
-               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
-       }
+       list_for_each_entry_rcu(sdev, &svm->devs, list)
+               intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+                                           svm->pasid, true);
        rcu_read_unlock();
 
 }
@@ -226,13 +228,212 @@ static LIST_HEAD(global_svm_list);
        list_for_each_entry((sdev), &(svm)->devs, list) \
                if ((d) != (sdev)->dev) {} else
 
-int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+                         struct iommu_gpasid_bind_data *data)
+{
+       struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+       struct dmar_domain *dmar_domain;
+       struct intel_svm_dev *sdev;
+       struct intel_svm *svm;
+       int ret = 0;
+
+       if (WARN_ON(!iommu) || !data)
+               return -EINVAL;
+
+       if (data->version != IOMMU_GPASID_BIND_VERSION_1 ||
+           data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
+               return -EINVAL;
+
+       if (!dev_is_pci(dev))
+               return -ENOTSUPP;
+
+       /* VT-d supports devices with full 20 bit PASIDs only */
+       if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
+               return -EINVAL;
+
+       /*
+        * We only check host PASID range, we have no knowledge to check
+        * guest PASID range.
+        */
+       if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
+               return -EINVAL;
+
+       dmar_domain = to_dmar_domain(domain);
+
+       mutex_lock(&pasid_mutex);
+       svm = ioasid_find(NULL, data->hpasid, NULL);
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
+
+       if (svm) {
+               /*
+                * If we found svm for the PASID, there must be at
+                * least one device bond, otherwise svm should be freed.
+                */
+               if (WARN_ON(list_empty(&svm->devs))) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               for_each_svm_dev(sdev, svm, dev) {
+                       /*
+                        * For devices with aux domains, we should allow
+                        * multiple bind calls with the same PASID and pdev.
+                        */
+                       if (iommu_dev_feature_enabled(dev,
+                                                     IOMMU_DEV_FEAT_AUX)) {
+                               sdev->users++;
+                       } else {
+                               dev_warn_ratelimited(dev,
+                                                    "Already bound with PASID %u\n",
+                                                    svm->pasid);
+                               ret = -EBUSY;
+                       }
+                       goto out;
+               }
+       } else {
+               /* We come here when PASID has never been bond to a device. */
+               svm = kzalloc(sizeof(*svm), GFP_KERNEL);
+               if (!svm) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               /* REVISIT: upper layer/VFIO can track host process that bind
+                * the PASID. ioasid_set = mm might be sufficient for vfio to
+                * check pasid VMM ownership. We can drop the following line
+                * once VFIO and IOASID set check is in place.
+                */
+               svm->mm = get_task_mm(current);
+               svm->pasid = data->hpasid;
+               if (data->flags & IOMMU_SVA_GPASID_VAL) {
+                       svm->gpasid = data->gpasid;
+                       svm->flags |= SVM_FLAG_GUEST_PASID;
+               }
+               ioasid_set_data(data->hpasid, svm);
+               INIT_LIST_HEAD_RCU(&svm->devs);
+               mmput(svm->mm);
+       }
+       sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+       if (!sdev) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       sdev->dev = dev;
+
+       /* Only count users if device has aux domains */
+       if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+               sdev->users = 1;
+
+       /* Set up device context entry for PASID if not enabled already */
+       ret = intel_iommu_enable_pasid(iommu, sdev->dev);
+       if (ret) {
+               dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
+               kfree(sdev);
+               goto out;
+       }
+
+       /*
+        * PASID table is per device for better security. Therefore, for
+        * each bind of a new device even with an existing PASID, we need to
+        * call the nested mode setup function here.
+        */
+       spin_lock(&iommu->lock);
+       ret = intel_pasid_setup_nested(iommu, dev,
+                                      (pgd_t *)(uintptr_t)data->gpgd,
+                                      data->hpasid, &data->vtd, dmar_domain,
+                                      data->addr_width);
+       spin_unlock(&iommu->lock);
+       if (ret) {
+               dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
+                                   data->hpasid, ret);
+               /*
+                * PASID entry should be in cleared state if nested mode
+                * set up failed. So we only need to clear IOASID tracking
+                * data such that free call will succeed.
+                */
+               kfree(sdev);
+               goto out;
+       }
+
+       svm->flags |= SVM_FLAG_GUEST_MODE;
+
+       init_rcu_head(&sdev->rcu);
+       list_add_rcu(&sdev->list, &svm->devs);
+ out:
+       if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
+               ioasid_set_data(data->hpasid, NULL);
+               kfree(svm);
+       }
+
+       mutex_unlock(&pasid_mutex);
+       return ret;
+}
+
+int intel_svm_unbind_gpasid(struct device *dev, int pasid)
+{
+       struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
+       struct intel_svm_dev *sdev;
+       struct intel_svm *svm;
+       int ret = -EINVAL;
+
+       if (WARN_ON(!iommu))
+               return -EINVAL;
+
+       mutex_lock(&pasid_mutex);
+       svm = ioasid_find(NULL, pasid, NULL);
+       if (!svm) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (IS_ERR(svm)) {
+               ret = PTR_ERR(svm);
+               goto out;
+       }
+
+       for_each_svm_dev(sdev, svm, dev) {
+               ret = 0;
+               if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
+                       sdev->users--;
+               if (!sdev->users) {
+                       list_del_rcu(&sdev->list);
+                       intel_pasid_tear_down_entry(iommu, dev,
+                                                   svm->pasid, false);
+                       intel_svm_drain_prq(dev, svm->pasid);
+                       kfree_rcu(sdev, rcu);
+
+                       if (list_empty(&svm->devs)) {
+                               /*
+                                * We do not free the IOASID here in that
+                                * IOMMU driver did not allocate it.
+                                * Unlike native SVM, IOASID for guest use was
+                                * allocated prior to the bind call.
+                                * In any case, if the free call comes before
+                                * the unbind, IOMMU driver will get notified
+                                * and perform cleanup.
+                                */
+                               ioasid_set_data(pasid, NULL);
+                               kfree(svm);
+                       }
+               }
+               break;
+       }
+out:
+       mutex_unlock(&pasid_mutex);
+       return ret;
+}
+
+/* Caller must hold pasid_mutex, mm reference */
+static int
+intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops,
+                 struct mm_struct *mm, struct intel_svm_dev **sd)
 {
        struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
        struct device_domain_info *info;
        struct intel_svm_dev *sdev;
        struct intel_svm *svm = NULL;
-       struct mm_struct *mm = NULL;
        int pasid_max;
        int ret;
 
@@ -249,16 +450,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
        } else
                pasid_max = 1 << 20;
 
+       /* Bind supervisor PASID shuld have mm = NULL */
        if (flags & SVM_FLAG_SUPERVISOR_MODE) {
-               if (!ecap_srs(iommu->ecap))
+               if (!ecap_srs(iommu->ecap) || mm) {
+                       pr_err("Supervisor PASID with user provided mm.\n");
                        return -EINVAL;
-       } else if (pasid) {
-               mm = get_task_mm(current);
-               BUG_ON(!mm);
+               }
        }
 
-       mutex_lock(&pasid_mutex);
-       if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) {
+       if (!(flags & SVM_FLAG_PRIVATE_PASID)) {
                struct intel_svm *t;
 
                list_for_each_entry(t, &global_svm_list, list) {
@@ -296,19 +496,12 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
        sdev->dev = dev;
 
        ret = intel_iommu_enable_pasid(iommu, dev);
-       if (ret || !pasid) {
-               /* If they don't actually want to assign a PASID, this is
-                * just an enabling check/preparation. */
-               kfree(sdev);
-               goto out;
-       }
-
-       info = dev->archdata.iommu;
-       if (!info || !info->pasid_supported) {
+       if (ret) {
                kfree(sdev);
                goto out;
        }
 
+       info = get_domain_info(dev);
        sdev->did = FLPT_DEFAULT_DID;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
        if (info->ats_enabled) {
@@ -397,26 +590,24 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                }
        }
        list_add_rcu(&sdev->list, &svm->devs);
-
- success:
-       *pasid = svm->pasid;
+success:
+       sdev->pasid = svm->pasid;
+       sdev->sva.dev = dev;
+       if (sd)
+               *sd = sdev;
        ret = 0;
  out:
-       mutex_unlock(&pasid_mutex);
-       if (mm)
-               mmput(mm);
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_svm_bind_mm);
 
-int intel_svm_unbind_mm(struct device *dev, int pasid)
+/* Caller must hold pasid_mutex */
+static int intel_svm_unbind_mm(struct device *dev, int pasid)
 {
        struct intel_svm_dev *sdev;
        struct intel_iommu *iommu;
        struct intel_svm *svm;
        int ret = -EINVAL;
 
-       mutex_lock(&pasid_mutex);
        iommu = intel_svm_device_to_iommu(dev);
        if (!iommu)
                goto out;
@@ -442,8 +633,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                         * to use. We have a *shared* PASID table, because it's
                         * large and has to be physically contiguous. So it's
                         * hard to be as defensive as we might like. */
-                       intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
-                       intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+                       intel_pasid_tear_down_entry(iommu, dev,
+                                                   svm->pasid, false);
+                       intel_svm_drain_prq(dev, svm->pasid);
                        kfree_rcu(sdev, rcu);
 
                        if (list_empty(&svm->devs)) {
@@ -462,45 +654,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                break;
        }
  out:
-       mutex_unlock(&pasid_mutex);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(intel_svm_unbind_mm);
-
-int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
-       struct intel_iommu *iommu;
-       struct intel_svm *svm;
-       int ret = -EINVAL;
-
-       mutex_lock(&pasid_mutex);
-       iommu = intel_svm_device_to_iommu(dev);
-       if (!iommu)
-               goto out;
-
-       svm = ioasid_find(NULL, pasid, NULL);
-       if (!svm)
-               goto out;
-
-       if (IS_ERR(svm)) {
-               ret = PTR_ERR(svm);
-               goto out;
-       }
-       /* init_mm is used in this case */
-       if (!svm->mm)
-               ret = 1;
-       else if (atomic_read(&svm->mm->mm_users) > 0)
-               ret = 1;
-       else
-               ret = 0;
-
- out:
-       mutex_unlock(&pasid_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
 
 /* Page request queue descriptor */
 struct page_req_dsc {
@@ -557,6 +713,93 @@ static bool is_canonical_address(u64 addr)
        return (((saddr << shift) >> shift) == saddr);
 }
 
+/**
+ * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * @dev: target device
+ * @pasid: pasid for draining
+ *
+ * Drain all pending page requests and responses related to @pasid in both
+ * software and hardware. This is supposed to be called after the device
+ * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
+ * and DevTLB have been invalidated.
+ *
+ * It waits until all pending page requests for @pasid in the page fault
+ * queue are completed by the prq handling thread. Then follow the steps
+ * described in VT-d spec CH7.10 to drain all page requests and page
+ * responses pending in the hardware.
+ */
+static void intel_svm_drain_prq(struct device *dev, int pasid)
+{
+       struct device_domain_info *info;
+       struct dmar_domain *domain;
+       struct intel_iommu *iommu;
+       struct qi_desc desc[3];
+       struct pci_dev *pdev;
+       int head, tail;
+       u16 sid, did;
+       int qdep;
+
+       info = get_domain_info(dev);
+       if (WARN_ON(!info || !dev_is_pci(dev)))
+               return;
+
+       if (!info->pri_enabled)
+               return;
+
+       iommu = info->iommu;
+       domain = info->domain;
+       pdev = to_pci_dev(dev);
+       sid = PCI_DEVID(info->bus, info->devfn);
+       did = domain->iommu_did[iommu->seq_id];
+       qdep = pci_ats_queue_depth(pdev);
+
+       /*
+        * Check and wait until all pending page requests in the queue are
+        * handled by the prq handling thread.
+        */
+prq_retry:
+       reinit_completion(&iommu->prq_complete);
+       tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
+       head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
+       while (head != tail) {
+               struct page_req_dsc *req;
+
+               req = &iommu->prq[head / sizeof(*req)];
+               if (!req->pasid_present || req->pasid != pasid) {
+                       head = (head + sizeof(*req)) & PRQ_RING_MASK;
+                       continue;
+               }
+
+               wait_for_completion(&iommu->prq_complete);
+               goto prq_retry;
+       }
+
+       /*
+        * Perform steps described in VT-d spec CH7.10 to drain page
+        * requests and responses in hardware.
+        */
+       memset(desc, 0, sizeof(desc));
+       desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
+                       QI_IWD_FENCE |
+                       QI_IWD_TYPE;
+       desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
+                       QI_EIOTLB_DID(did) |
+                       QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+                       QI_EIOTLB_TYPE;
+       desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
+                       QI_DEV_EIOTLB_SID(sid) |
+                       QI_DEV_EIOTLB_QDEP(qdep) |
+                       QI_DEIOTLB_TYPE |
+                       QI_DEV_IOTLB_PFSID(info->pfsid);
+qi_retry:
+       reinit_completion(&iommu->prq_complete);
+       qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
+       if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
+               wait_for_completion(&iommu->prq_complete);
+               goto qi_retry;
+       }
+}
+
 static irqreturn_t prq_event_thread(int irq, void *d)
 {
        struct intel_iommu *iommu = d;
@@ -685,12 +928,75 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                                       sizeof(req->priv_data));
                        resp.qw2 = 0;
                        resp.qw3 = 0;
-                       qi_submit_sync(&resp, iommu);
+                       qi_submit_sync(iommu, &resp, 1, 0);
                }
                head = (head + sizeof(*req)) & PRQ_RING_MASK;
        }
 
        dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
 
+       /*
+        * Clear the page request overflow bit and wake up all threads that
+        * are waiting for the completion of this handling.
+        */
+       if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
+               writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
+
+       if (!completion_done(&iommu->prq_complete))
+               complete(&iommu->prq_complete);
+
        return IRQ_RETVAL(handled);
 }
+
+#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
+struct iommu_sva *
+intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+       struct iommu_sva *sva = ERR_PTR(-EINVAL);
+       struct intel_svm_dev *sdev = NULL;
+       int flags = 0;
+       int ret;
+
+       /*
+        * TODO: Consolidate with generic iommu-sva bind after it is merged.
+        * It will require shared SVM data structures, i.e. combine io_mm
+        * and intel_svm etc.
+        */
+       if (drvdata)
+               flags = *(int *)drvdata;
+       mutex_lock(&pasid_mutex);
+       ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev);
+       if (ret)
+               sva = ERR_PTR(ret);
+       else if (sdev)
+               sva = &sdev->sva;
+       else
+               WARN(!sdev, "SVM bind succeeded with no sdev!\n");
+
+       mutex_unlock(&pasid_mutex);
+
+       return sva;
+}
+
+void intel_svm_unbind(struct iommu_sva *sva)
+{
+       struct intel_svm_dev *sdev;
+
+       mutex_lock(&pasid_mutex);
+       sdev = to_intel_svm_dev(sva);
+       intel_svm_unbind_mm(sdev->dev, sdev->pasid);
+       mutex_unlock(&pasid_mutex);
+}
+
+int intel_svm_get_pasid(struct iommu_sva *sva)
+{
+       struct intel_svm_dev *sdev;
+       int pasid;
+
+       mutex_lock(&pasid_mutex);
+       sdev = to_intel_svm_dev(sva);
+       pasid = sdev->pasid;
+       mutex_unlock(&pasid_mutex);
+
+       return pasid;
+}
index 81e43c1df7ecb5fdd769e2dc1bc4c265f37fec1f..a042f123b091c695475c3fa555a67449c16ae6fe 100644 (file)
@@ -151,7 +151,7 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
        desc.qw2 = 0;
        desc.qw3 = 0;
 
-       return qi_submit_sync(&desc, iommu);
+       return qi_submit_sync(iommu, &desc, 1, 0);
 }
 
 static int modify_irte(struct irq_2_iommu *irq_iommu,
index a9e5618cde8027e342bbae88e24b766c78b4d377..b5ea203f6c683651ec42778e77e9f0fc2fbca14e 100644 (file)
@@ -80,7 +80,8 @@ static bool iommu_cmd_line_dma_api(void)
        return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
 }
 
-static int iommu_alloc_default_domain(struct device *dev);
+static int iommu_alloc_default_domain(struct iommu_group *group,
+                                     struct device *dev);
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
                                                 unsigned type);
 static int __iommu_attach_device(struct iommu_domain *domain,
@@ -184,6 +185,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
 
 static void dev_iommu_free(struct device *dev)
 {
+       iommu_fwspec_free(dev);
        kfree(dev->iommu);
        dev->iommu = NULL;
 }
@@ -195,6 +197,9 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
        struct iommu_group *group;
        int ret;
 
+       if (!ops)
+               return -ENODEV;
+
        if (!dev_iommu_get(dev))
                return -ENOMEM;
 
@@ -247,17 +252,17 @@ int iommu_probe_device(struct device *dev)
        if (ret)
                goto err_out;
 
+       group = iommu_group_get(dev);
+       if (!group)
+               goto err_release;
+
        /*
         * Try to allocate a default domain - needs support from the
         * IOMMU driver. There are still some drivers which don't
         * support default domains, so the return value is not yet
         * checked.
         */
-       iommu_alloc_default_domain(dev);
-
-       group = iommu_group_get(dev);
-       if (!group)
-               goto err_release;
+       iommu_alloc_default_domain(group, dev);
 
        if (group->default_domain)
                ret = __iommu_attach_device(group->default_domain, dev);
@@ -582,7 +587,7 @@ struct iommu_group *iommu_group_alloc(void)
                                   NULL, "%d", group->id);
        if (ret) {
                ida_simple_remove(&iommu_group_ida, group->id);
-               kfree(group);
+               kobject_put(&group->kobj);
                return ERR_PTR(ret);
        }
 
@@ -765,6 +770,15 @@ out:
        return ret;
 }
 
+static bool iommu_is_attach_deferred(struct iommu_domain *domain,
+                                    struct device *dev)
+{
+       if (domain->ops->is_attach_deferred)
+               return domain->ops->is_attach_deferred(domain, dev);
+
+       return false;
+}
+
 /**
  * iommu_group_add_device - add a device to an iommu group
  * @group: the group into which to add the device (reference should be held)
@@ -817,7 +831,7 @@ rename:
 
        mutex_lock(&group->mutex);
        list_add_tail(&device->list, &group->devices);
-       if (group->domain)
+       if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
                ret = __iommu_attach_device(group->domain, dev);
        mutex_unlock(&group->mutex);
        if (ret)
@@ -1474,15 +1488,11 @@ static int iommu_group_alloc_default_domain(struct bus_type *bus,
        return 0;
 }
 
-static int iommu_alloc_default_domain(struct device *dev)
+static int iommu_alloc_default_domain(struct iommu_group *group,
+                                     struct device *dev)
 {
-       struct iommu_group *group;
        unsigned int type;
 
-       group = iommu_group_get(dev);
-       if (!group)
-               return -ENODEV;
-
        if (group->default_domain)
                return 0;
 
@@ -1670,17 +1680,8 @@ static void probe_alloc_default_domain(struct bus_type *bus,
 static int iommu_group_do_dma_attach(struct device *dev, void *data)
 {
        struct iommu_domain *domain = data;
-       const struct iommu_ops *ops;
-       int ret;
-
-       ret = __iommu_attach_device(domain, dev);
-
-       ops = domain->ops;
 
-       if (ret == 0 && ops->probe_finalize)
-               ops->probe_finalize(dev);
-
-       return ret;
+       return __iommu_attach_device(domain, dev);
 }
 
 static int __iommu_group_dma_attach(struct iommu_group *group)
@@ -1689,6 +1690,22 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
                                          iommu_group_do_dma_attach);
 }
 
+static int iommu_group_do_probe_finalize(struct device *dev, void *data)
+{
+       struct iommu_domain *domain = data;
+
+       if (domain->ops->probe_finalize)
+               domain->ops->probe_finalize(dev);
+
+       return 0;
+}
+
+static void __iommu_group_dma_finalize(struct iommu_group *group)
+{
+       __iommu_group_for_each_dev(group, group->default_domain,
+                                  iommu_group_do_probe_finalize);
+}
+
 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
 {
        struct iommu_group *group = data;
@@ -1741,6 +1758,8 @@ int bus_iommu_probe(struct bus_type *bus)
 
                if (ret)
                        break;
+
+               __iommu_group_dma_finalize(group);
        }
 
        return ret;
@@ -1889,9 +1908,6 @@ static int __iommu_attach_device(struct iommu_domain *domain,
                                 struct device *dev)
 {
        int ret;
-       if ((domain->ops->is_attach_deferred != NULL) &&
-           domain->ops->is_attach_deferred(domain, dev))
-               return 0;
 
        if (unlikely(domain->ops->attach_dev == NULL))
                return -ENODEV;
@@ -1963,8 +1979,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
 static void __iommu_detach_device(struct iommu_domain *domain,
                                  struct device *dev)
 {
-       if ((domain->ops->is_attach_deferred != NULL) &&
-           domain->ops->is_attach_deferred(domain, dev))
+       if (iommu_is_attach_deferred(domain, dev))
                return;
 
        if (unlikely(domain->ops->detach_dev == NULL))
@@ -2532,71 +2547,6 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
 }
 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
 
-static int
-request_default_domain_for_dev(struct device *dev, unsigned long type)
-{
-       struct iommu_domain *domain;
-       struct iommu_group *group;
-       int ret;
-
-       /* Device must already be in a group before calling this function */
-       group = iommu_group_get(dev);
-       if (!group)
-               return -EINVAL;
-
-       mutex_lock(&group->mutex);
-
-       ret = 0;
-       if (group->default_domain && group->default_domain->type == type)
-               goto out;
-
-       /* Don't change mappings of existing devices */
-       ret = -EBUSY;
-       if (iommu_group_device_count(group) != 1)
-               goto out;
-
-       ret = -ENOMEM;
-       domain = __iommu_domain_alloc(dev->bus, type);
-       if (!domain)
-               goto out;
-
-       /* Attach the device to the domain */
-       ret = __iommu_attach_group(domain, group);
-       if (ret) {
-               iommu_domain_free(domain);
-               goto out;
-       }
-
-       /* Make the domain the default for this group */
-       if (group->default_domain)
-               iommu_domain_free(group->default_domain);
-       group->default_domain = domain;
-
-       iommu_create_device_direct_mappings(group, dev);
-
-       dev_info(dev, "Using iommu %s mapping\n",
-                type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
-
-       ret = 0;
-out:
-       mutex_unlock(&group->mutex);
-       iommu_group_put(group);
-
-       return ret;
-}
-
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
-{
-       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
-}
-
-/* Request that a device can't be direct mapped by the IOMMU */
-int iommu_request_dma_domain_for_dev(struct device *dev)
-{
-       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
-}
-
 void iommu_set_default_passthrough(bool cmd_line)
 {
        if (cmd_line)
@@ -2874,17 +2824,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
 }
 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
 
-int iommu_sva_set_ops(struct iommu_sva *handle,
-                     const struct iommu_sva_ops *sva_ops)
-{
-       if (handle->ops && handle->ops != sva_ops)
-               return -EEXIST;
-
-       handle->ops = sva_ops;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
-
 int iommu_sva_get_pasid(struct iommu_sva *handle)
 {
        const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
index 0e6a9536eca624ca0b0a77a6c796efb196821771..49fc01f2a28d4ea7c5c3fdec5fd891269e00f07e 100644 (file)
@@ -253,7 +253,7 @@ int iova_cache_get(void)
                        SLAB_HWCACHE_ALIGN, NULL);
                if (!iova_cache) {
                        mutex_unlock(&iova_cache_mutex);
-                       printk(KERN_ERR "Couldn't create iova cache\n");
+                       pr_err("Couldn't create iova cache\n");
                        return -ENOMEM;
                }
        }
@@ -718,8 +718,8 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
 
                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
                if (!new_iova)
-                       printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
-                               iova->pfn_lo, iova->pfn_lo);
+                       pr_err("Reserve iova range %lx@%lx failed\n",
+                              iova->pfn_lo, iova->pfn_lo);
        }
        spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
index fb7e702dee2375c09605d48337c3f1170c158c72..4c2972f3153b52bfb48e7d76b64274f394aaccec 100644 (file)
@@ -903,11 +903,8 @@ static const struct iommu_ops ipmmu_ops = {
        .probe_device = ipmmu_probe_device,
        .release_device = ipmmu_release_device,
        .probe_finalize = ipmmu_probe_finalize,
-#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
-       .device_group = generic_device_group,
-#else
-       .device_group = ipmmu_find_group,
-#endif
+       .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
+                       ? generic_device_group : ipmmu_find_group,
        .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
        .of_xlate = ipmmu_of_xlate,
 };
index 10cd4db0710a8a45ddd52cb2bb96bc99261bb87e..3d8a63555c2505e4dcd83500cfedcb3a93fd2437 100644 (file)
@@ -34,7 +34,7 @@ __asm__ __volatile__ (                                                        \
 /* bitmap of the page sizes currently supported */
 #define MSM_IOMMU_PGSIZES      (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
 
-DEFINE_SPINLOCK(msm_iommu_lock);
+static DEFINE_SPINLOCK(msm_iommu_lock);
 static LIST_HEAD(qcom_iommu_devices);
 static struct iommu_ops msm_iommu_ops;
 
index 7bdd74c7cb9fde7ae31596a7f6c67262723c2e63..c9d79cff4d178ee0062bb4a6c1fa9515dbe71c0a 100644 (file)
@@ -265,10 +265,13 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 {
        struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+       struct dma_iommu_mapping *mtk_mapping;
        int ret;
 
-       if (!data)
-               return -ENODEV;
+       /* Only allow the domain created internally. */
+       mtk_mapping = data->dev->archdata.iommu;
+       if (mtk_mapping->domain != domain)
+               return 0;
 
        if (!data->m4u_dom) {
                data->m4u_dom = dom;
@@ -288,9 +291,6 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
 {
        struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
 
-       if (!data)
-               return;
-
        mtk_iommu_config(data, dev, false);
 }
 
@@ -416,6 +416,11 @@ static int mtk_iommu_create_mapping(struct device *dev,
        return 0;
 }
 
+static int mtk_iommu_def_domain_type(struct device *dev)
+{
+       return IOMMU_DOMAIN_UNMANAGED;
+}
+
 static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -464,12 +469,10 @@ static void mtk_iommu_probe_finalize(struct device *dev)
 static void mtk_iommu_release_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct mtk_iommu_data *data;
 
        if (!fwspec || fwspec->ops != &mtk_iommu_ops)
                return;
 
-       data = dev_iommu_priv_get(dev);
        iommu_fwspec_free(dev);
 }
 
@@ -525,6 +528,7 @@ static const struct iommu_ops mtk_iommu_ops = {
        .probe_device   = mtk_iommu_probe_device,
        .probe_finalize = mtk_iommu_probe_finalize,
        .release_device = mtk_iommu_release_device,
+       .def_domain_type = mtk_iommu_def_domain_type,
        .device_group   = generic_device_group,
        .pgsize_bitmap  = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
 };
index 6699fe6d9e06efca0a8291af5a1db7ed3c21a325..c8282cc212cbd69c81f3b7d26effa569700087c1 100644 (file)
@@ -1236,6 +1236,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
                        goto out_group;
 
                iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
+               iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
 
                err = iommu_device_register(&obj->iommu);
                if (err)
@@ -1726,6 +1727,9 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
        struct iommu_group *group = ERR_PTR(-EINVAL);
 
+       if (!arch_data)
+               return ERR_PTR(-ENODEV);
+
        if (arch_data->iommu_dev)
                group = iommu_group_ref_get(arch_data->iommu_dev->group);
 
index 054e476ebd49c77cc797269b34cb0ed8031e4298..c3e1fbd1988cf2bd51d0947313335cba44e3047b 100644 (file)
@@ -814,8 +814,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        qcom_iommu->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res)
+       if (res) {
                qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(qcom_iommu->local_base))
+                       return PTR_ERR(qcom_iommu->local_base);
+       }
 
        qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
        if (IS_ERR(qcom_iommu->iface_clk)) {
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
new file mode 100644 (file)
index 0000000..fce605e
--- /dev/null
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
+// Copyright (C) 2019-2020, Cerno
+
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define IOMMU_RESET_REG                        0x010
+#define IOMMU_ENABLE_REG               0x020
+#define IOMMU_ENABLE_ENABLE                    BIT(0)
+
+#define IOMMU_BYPASS_REG               0x030
+#define IOMMU_AUTO_GATING_REG          0x040
+#define IOMMU_AUTO_GATING_ENABLE               BIT(0)
+
+#define IOMMU_WBUF_CTRL_REG            0x044
+#define IOMMU_OOO_CTRL_REG             0x048
+#define IOMMU_4KB_BDY_PRT_CTRL_REG     0x04c
+#define IOMMU_TTB_REG                  0x050
+#define IOMMU_TLB_ENABLE_REG           0x060
+#define IOMMU_TLB_PREFETCH_REG         0x070
+#define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m)    BIT(m)
+
+#define IOMMU_TLB_FLUSH_REG            0x080
+#define IOMMU_TLB_FLUSH_PTW_CACHE              BIT(17)
+#define IOMMU_TLB_FLUSH_MACRO_TLB              BIT(16)
+#define IOMMU_TLB_FLUSH_MICRO_TLB(i)           (BIT(i) & GENMASK(5, 0))
+
+#define IOMMU_TLB_IVLD_ADDR_REG                0x090
+#define IOMMU_TLB_IVLD_ADDR_MASK_REG   0x094
+#define IOMMU_TLB_IVLD_ENABLE_REG      0x098
+#define IOMMU_TLB_IVLD_ENABLE_ENABLE           BIT(0)
+
+#define IOMMU_PC_IVLD_ADDR_REG         0x0a0
+#define IOMMU_PC_IVLD_ENABLE_REG       0x0a8
+#define IOMMU_PC_IVLD_ENABLE_ENABLE            BIT(0)
+
+#define IOMMU_DM_AUT_CTRL_REG(d)       (0x0b0 + ((d) / 2) * 4)
+#define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m)     (1 << (((d & 1) * 16) + ((m) * 2)))
+#define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m)     (1 << (((d & 1) * 16) + ((m) * 2) + 1))
+
+#define IOMMU_DM_AUT_OVWT_REG          0x0d0
+#define IOMMU_INT_ENABLE_REG           0x100
+#define IOMMU_INT_CLR_REG              0x104
+#define IOMMU_INT_STA_REG              0x108
+#define IOMMU_INT_ERR_ADDR_REG(i)      (0x110 + (i) * 4)
+#define IOMMU_INT_ERR_ADDR_L1_REG      0x130
+#define IOMMU_INT_ERR_ADDR_L2_REG      0x134
+#define IOMMU_INT_ERR_DATA_REG(i)      (0x150 + (i) * 4)
+#define IOMMU_L1PG_INT_REG             0x0180
+#define IOMMU_L2PG_INT_REG             0x0184
+
+#define IOMMU_INT_INVALID_L2PG                 BIT(17)
+#define IOMMU_INT_INVALID_L1PG                 BIT(16)
+#define IOMMU_INT_MASTER_PERMISSION(m)         BIT(m)
+#define IOMMU_INT_MASTER_MASK                  (IOMMU_INT_MASTER_PERMISSION(0) | \
+                                                IOMMU_INT_MASTER_PERMISSION(1) | \
+                                                IOMMU_INT_MASTER_PERMISSION(2) | \
+                                                IOMMU_INT_MASTER_PERMISSION(3) | \
+                                                IOMMU_INT_MASTER_PERMISSION(4) | \
+                                                IOMMU_INT_MASTER_PERMISSION(5))
+#define IOMMU_INT_MASK                         (IOMMU_INT_INVALID_L1PG | \
+                                                IOMMU_INT_INVALID_L2PG | \
+                                                IOMMU_INT_MASTER_MASK)
+
+#define PT_ENTRY_SIZE                  sizeof(u32)
+
+#define NUM_DT_ENTRIES                 4096
+#define DT_SIZE                                (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
+
+#define NUM_PT_ENTRIES                 256
+#define PT_SIZE                                (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
+
+struct sun50i_iommu {
+       struct iommu_device iommu;
+
+       /* Lock to modify the IOMMU registers */
+       spinlock_t iommu_lock;
+
+       struct device *dev;
+       void __iomem *base;
+       struct reset_control *reset;
+       struct clk *clk;
+
+       struct iommu_domain *domain;
+       struct iommu_group *group;
+       struct kmem_cache *pt_pool;
+};
+
+struct sun50i_iommu_domain {
+       struct iommu_domain domain;
+
+       /* Number of devices attached to the domain */
+       refcount_t refcnt;
+
+       /* L1 Page Table */
+       u32 *dt;
+       dma_addr_t dt_dma;
+
+       struct sun50i_iommu *iommu;
+};
+
+static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
+{
+       return container_of(domain, struct sun50i_iommu_domain, domain);
+}
+
+static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
+{
+       return dev_iommu_priv_get(dev);
+}
+
+static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
+{
+       return readl(iommu->base + offset);
+}
+
+static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
+{
+       writel(value, iommu->base + offset);
+}
+
+/*
+ * The Allwinner H6 IOMMU uses a 2-level page table.
+ *
+ * The first level is the usual Directory Table (DT), that consists of
+ * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
+ * Table (PT).
+ *
+ * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
+ * pointing to a 4kB page of physical memory.
+ *
+ * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
+ * register that contains its physical address.
+ */
+
+#define SUN50I_IOVA_DTE_MASK   GENMASK(31, 20)
+#define SUN50I_IOVA_PTE_MASK   GENMASK(19, 12)
+#define SUN50I_IOVA_PAGE_MASK  GENMASK(11, 0)
+
+static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
+{
+       return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
+{
+       return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
+}
+
+static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
+{
+       return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
+}
+
+/*
+ * Each Directory Table Entry has a Page Table address and a valid
+ * bit:
+
+ * +---------------------+-----------+-+
+ * | PT address          | Reserved  |V|
+ * +---------------------+-----------+-+
+ *  31:10 - Page Table address
+ *   9:2  - Reserved
+ *   1:0  - 1 if the entry is valid
+ */
+
+#define SUN50I_DTE_PT_ADDRESS_MASK     GENMASK(31, 10)
+#define SUN50I_DTE_PT_ATTRS            GENMASK(1, 0)
+#define SUN50I_DTE_PT_VALID            1
+
+static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
+{
+       return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
+}
+
+static bool sun50i_dte_is_pt_valid(u32 dte)
+{
+       return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
+}
+
+static u32 sun50i_mk_dte(dma_addr_t pt_dma)
+{
+       return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, an authority index and a valid bit:
+ *
+ * +----------------+-----+-----+-----+---+-----+
+ * | Page address   | Rsv | ACI | Rsv | V | Rsv |
+ * +----------------+-----+-----+-----+---+-----+
+ *  31:12 - Page address
+ *  11:8  - Reserved
+ *   7:4  - Authority Control Index
+ *   3:2  - Reserved
+ *     1  - 1 if the entry is valid
+ *     0  - Reserved
+ *
+ * The way permissions work is that the IOMMU has 16 "domains" that
+ * can be configured to give each masters either read or write
+ * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
+ * 0 seems like the default domain, and its permissions in the
+ * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
+ * useful to enforce any particular permission.
+ *
+ * Each page entry will then have a reference to the domain they are
+ * affected to, so that we can actually enforce them on a per-page
+ * basis.
+ *
+ * In order to make it work with the IOMMU framework, we will be using
+ * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
+ * depending on the permission we want to enforce. Each domain will
+ * have each master setup in the same way, since the IOMMU framework
+ * doesn't seem to restrict page access on a per-device basis. And
+ * then we will use the relevant domain index when generating the page
+ * table entry depending on the permissions we want to be enforced.
+ */
+
+enum sun50i_iommu_aci {
+       SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
+       SUN50I_IOMMU_ACI_NONE,
+       SUN50I_IOMMU_ACI_RD,
+       SUN50I_IOMMU_ACI_WR,
+       SUN50I_IOMMU_ACI_RD_WR,
+};
+
+#define SUN50I_PTE_PAGE_ADDRESS_MASK   GENMASK(31, 12)
+#define SUN50I_PTE_ACI_MASK            GENMASK(7, 4)
+#define SUN50I_PTE_PAGE_VALID          BIT(1)
+
+static phys_addr_t sun50i_pte_get_page_address(u32 pte)
+{
+       return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
+}
+
+static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
+{
+       return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
+}
+
+static bool sun50i_pte_is_page_valid(u32 pte)
+{
+       return pte & SUN50I_PTE_PAGE_VALID;
+}
+
+static u32 sun50i_mk_pte(phys_addr_t page, int prot)
+{
+       enum sun50i_iommu_aci aci;
+       u32 flags = 0;
+
+       if (prot & (IOMMU_READ | IOMMU_WRITE))
+               aci = SUN50I_IOMMU_ACI_RD_WR;
+       else if (prot & IOMMU_READ)
+               aci = SUN50I_IOMMU_ACI_RD;
+       else if (prot & IOMMU_WRITE)
+               aci = SUN50I_IOMMU_ACI_WR;
+       else
+               aci = SUN50I_IOMMU_ACI_NONE;
+
+       flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
+       page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
+       return page | flags | SUN50I_PTE_PAGE_VALID;
+}
+
+static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
+                              void *vaddr, unsigned int count)
+{
+       struct sun50i_iommu *iommu = sun50i_domain->iommu;
+       dma_addr_t dma = virt_to_phys(vaddr);
+       size_t size = count * PT_ENTRY_SIZE;
+
+       dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
+}
+
+static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
+{
+       u32 reg;
+       int ret;
+
+       assert_spin_locked(&iommu->iommu_lock);
+
+       iommu_write(iommu,
+                   IOMMU_TLB_FLUSH_REG,
+                   IOMMU_TLB_FLUSH_PTW_CACHE |
+                   IOMMU_TLB_FLUSH_MACRO_TLB |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(5) |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(4) |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(3) |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(2) |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(1) |
+                   IOMMU_TLB_FLUSH_MICRO_TLB(0));
+
+       ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
+                                reg, !reg,
+                                1, 2000);
+       if (ret)
+               dev_warn(iommu->dev, "TLB Flush timed out!\n");
+
+       return ret;
+}
+
+static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       struct sun50i_iommu *iommu = sun50i_domain->iommu;
+       unsigned long flags;
+
+       /*
+        * At boot, we'll have a first call into .flush_iotlb_all right after
+        * .probe_device, and since we link our (single) domain to our iommu in
+        * the .attach_device callback, we don't have that pointer set.
+        *
+        * It shouldn't really be any trouble to ignore it though since we flush
+        * all caches as part of the device powerup.
+        */
+       if (!iommu)
+               return;
+
+       spin_lock_irqsave(&iommu->iommu_lock, flags);
+       sun50i_iommu_flush_all_tlb(iommu);
+       spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+}
+
+static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
+                                   struct iommu_iotlb_gather *gather)
+{
+       sun50i_iommu_flush_iotlb_all(domain);
+}
+
+static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
+{
+       struct sun50i_iommu_domain *sun50i_domain;
+       unsigned long flags;
+       int ret;
+
+       if (!iommu->domain)
+               return 0;
+
+       sun50i_domain = to_sun50i_domain(iommu->domain);
+
+       ret = reset_control_deassert(iommu->reset);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(iommu->clk);
+       if (ret)
+               goto err_reset_assert;
+
+       spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+       iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
+       iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
+                   IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
+       iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
+       iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
+
+       iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
+                   IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
+
+       iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
+                   IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
+
+       ret = sun50i_iommu_flush_all_tlb(iommu);
+       if (ret) {
+               spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+               goto err_clk_disable;
+       }
+
+       iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
+       iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
+
+       spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+       return 0;
+
+err_clk_disable:
+       clk_disable_unprepare(iommu->clk);
+
+err_reset_assert:
+       reset_control_assert(iommu->reset);
+
+       return ret;
+}
+
+static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->iommu_lock, flags);
+
+       iommu_write(iommu, IOMMU_ENABLE_REG, 0);
+       iommu_write(iommu, IOMMU_TTB_REG, 0);
+
+       spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+       clk_disable_unprepare(iommu->clk);
+       reset_control_assert(iommu->reset);
+}
+
+static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
+                                          gfp_t gfp)
+{
+       dma_addr_t pt_dma;
+       u32 *page_table;
+
+       page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
+       if (!page_table)
+               return ERR_PTR(-ENOMEM);
+
+       pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
+       if (dma_mapping_error(iommu->dev, pt_dma)) {
+               dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
+               kmem_cache_free(iommu->pt_pool, page_table);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* We rely on the physical address and DMA address being the same */
+       WARN_ON(pt_dma != virt_to_phys(page_table));
+
+       return page_table;
+}
+
+static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
+                                        u32 *page_table)
+{
+       phys_addr_t pt_phys = virt_to_phys(page_table);
+
+       dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
+       kmem_cache_free(iommu->pt_pool, page_table);
+}
+
+static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
+                                     dma_addr_t iova, gfp_t gfp)
+{
+       struct sun50i_iommu *iommu = sun50i_domain->iommu;
+       u32 *page_table;
+       u32 *dte_addr;
+       u32 old_dte;
+       u32 dte;
+
+       dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+       dte = *dte_addr;
+       if (sun50i_dte_is_pt_valid(dte)) {
+               phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
+               return (u32 *)phys_to_virt(pt_phys);
+       }
+
+       page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
+       if (IS_ERR(page_table))
+               return page_table;
+
+       dte = sun50i_mk_dte(virt_to_phys(page_table));
+       old_dte = cmpxchg(dte_addr, 0, dte);
+       if (old_dte) {
+               phys_addr_t installed_pt_phys =
+                       sun50i_dte_get_pt_address(old_dte);
+               u32 *installed_pt = phys_to_virt(installed_pt_phys);
+               u32 *drop_pt = page_table;
+
+               page_table = installed_pt;
+               dte = old_dte;
+               sun50i_iommu_free_page_table(iommu, drop_pt);
+       }
+
+       sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
+       sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+       return page_table;
+}
+
+static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
+                           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       struct sun50i_iommu *iommu = sun50i_domain->iommu;
+       u32 pte_index;
+       u32 *page_table, *pte_addr;
+       int ret = 0;
+
+       page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
+       if (IS_ERR(page_table)) {
+               ret = PTR_ERR(page_table);
+               goto out;
+       }
+
+       pte_index = sun50i_iova_get_pte_index(iova);
+       pte_addr = &page_table[pte_index];
+       if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
+               phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
+               dev_err(iommu->dev,
+                       "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
+                       &iova, &page_phys, &paddr, prot);
+               ret = -EBUSY;
+               goto out;
+       }
+
+       *pte_addr = sun50i_mk_pte(paddr, prot);
+       sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+out:
+       return ret;
+}
+
+static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+                                size_t size, struct iommu_iotlb_gather *gather)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       phys_addr_t pt_phys;
+       dma_addr_t pte_dma;
+       u32 *pte_addr;
+       u32 dte;
+
+       dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+       if (!sun50i_dte_is_pt_valid(dte))
+               return 0;
+
+       pt_phys = sun50i_dte_get_pt_address(dte);
+       pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
+       pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
+
+       if (!sun50i_pte_is_page_valid(*pte_addr))
+               return 0;
+
+       memset(pte_addr, 0, sizeof(*pte_addr));
+       sun50i_table_flush(sun50i_domain, pte_addr, 1);
+
+       return SZ_4K;
+}
+
+static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
+                                            dma_addr_t iova)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       phys_addr_t pt_phys;
+       u32 *page_table;
+       u32 dte, pte;
+
+       dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
+       if (!sun50i_dte_is_pt_valid(dte))
+               return 0;
+
+       pt_phys = sun50i_dte_get_pt_address(dte);
+       page_table = (u32 *)phys_to_virt(pt_phys);
+       pte = page_table[sun50i_iova_get_pte_index(iova)];
+       if (!sun50i_pte_is_page_valid(pte))
+               return 0;
+
+       return sun50i_pte_get_page_address(pte) +
+               sun50i_iova_get_page_offset(iova);
+}
+
+static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+{
+       struct sun50i_iommu_domain *sun50i_domain;
+
+       if (type != IOMMU_DOMAIN_DMA &&
+           type != IOMMU_DOMAIN_IDENTITY &&
+           type != IOMMU_DOMAIN_UNMANAGED)
+               return NULL;
+
+       sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
+       if (!sun50i_domain)
+               return NULL;
+
+       if (type == IOMMU_DOMAIN_DMA &&
+           iommu_get_dma_cookie(&sun50i_domain->domain))
+               goto err_free_domain;
+
+       sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                   get_order(DT_SIZE));
+       if (!sun50i_domain->dt)
+               goto err_put_cookie;
+
+       refcount_set(&sun50i_domain->refcnt, 1);
+
+       sun50i_domain->domain.geometry.aperture_start = 0;
+       sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+       sun50i_domain->domain.geometry.force_aperture = true;
+
+       return &sun50i_domain->domain;
+
+err_put_cookie:
+       if (type == IOMMU_DOMAIN_DMA)
+               iommu_put_dma_cookie(&sun50i_domain->domain);
+
+err_free_domain:
+       kfree(sun50i_domain);
+
+       return NULL;
+}
+
+static void sun50i_iommu_domain_free(struct iommu_domain *domain)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+
+       free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
+       sun50i_domain->dt = NULL;
+
+       iommu_put_dma_cookie(domain);
+
+       kfree(sun50i_domain);
+}
+
+static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
+                                     struct sun50i_iommu_domain *sun50i_domain)
+{
+       iommu->domain = &sun50i_domain->domain;
+       sun50i_domain->iommu = iommu;
+
+       sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
+                                              DT_SIZE, DMA_TO_DEVICE);
+       if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
+               dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
+               return -ENOMEM;
+       }
+
+       return sun50i_iommu_enable(iommu);
+}
+
+static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
+                                      struct sun50i_iommu_domain *sun50i_domain)
+{
+       unsigned int i;
+
+       for (i = 0; i < NUM_DT_ENTRIES; i++) {
+               phys_addr_t pt_phys;
+               u32 *page_table;
+               u32 *dte_addr;
+               u32 dte;
+
+               dte_addr = &sun50i_domain->dt[i];
+               dte = *dte_addr;
+               if (!sun50i_dte_is_pt_valid(dte))
+                       continue;
+
+               memset(dte_addr, 0, sizeof(*dte_addr));
+               sun50i_table_flush(sun50i_domain, dte_addr, 1);
+
+               pt_phys = sun50i_dte_get_pt_address(dte);
+               page_table = phys_to_virt(pt_phys);
+               sun50i_iommu_free_page_table(iommu, page_table);
+       }
+
+
+       sun50i_iommu_disable(iommu);
+
+       dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
+                        DT_SIZE, DMA_TO_DEVICE);
+
+       iommu->domain = NULL;
+}
+
+static void sun50i_iommu_detach_device(struct iommu_domain *domain,
+                                      struct device *dev)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
+
+       dev_dbg(dev, "Detaching from IOMMU domain\n");
+
+       if (iommu->domain != domain)
+               return;
+
+       if (refcount_dec_and_test(&sun50i_domain->refcnt))
+               sun50i_iommu_detach_domain(iommu, sun50i_domain);
+}
+
+static int sun50i_iommu_attach_device(struct iommu_domain *domain,
+                                     struct device *dev)
+{
+       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
+       struct sun50i_iommu *iommu;
+
+       iommu = sun50i_iommu_from_dev(dev);
+       if (!iommu)
+               return -ENODEV;
+
+       dev_dbg(dev, "Attaching to IOMMU domain\n");
+
+       refcount_inc(&sun50i_domain->refcnt);
+
+       if (iommu->domain == domain)
+               return 0;
+
+       if (iommu->domain)
+               sun50i_iommu_detach_device(iommu->domain, dev);
+
+       sun50i_iommu_attach_domain(iommu, sun50i_domain);
+
+       return 0;
+}
+
+static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
+{
+       struct sun50i_iommu *iommu;
+
+       iommu = sun50i_iommu_from_dev(dev);
+       if (!iommu)
+               return ERR_PTR(-ENODEV);
+
+       return &iommu->iommu;
+}
+
+static void sun50i_iommu_release_device(struct device *dev) {}
+
+static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
+{
+       struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
+
+       return iommu_group_ref_get(iommu->group);
+}
+
+static int sun50i_iommu_of_xlate(struct device *dev,
+                                struct of_phandle_args *args)
+{
+       struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
+       unsigned id = args->args[0];
+
+       dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
+
+       return iommu_fwspec_add_ids(dev, &id, 1);
+}
+
+static const struct iommu_ops sun50i_iommu_ops = {
+       .pgsize_bitmap  = SZ_4K,
+       .attach_dev     = sun50i_iommu_attach_device,
+       .detach_dev     = sun50i_iommu_detach_device,
+       .device_group   = sun50i_iommu_device_group,
+       .domain_alloc   = sun50i_iommu_domain_alloc,
+       .domain_free    = sun50i_iommu_domain_free,
+       .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+       .iotlb_sync     = sun50i_iommu_iotlb_sync,
+       .iova_to_phys   = sun50i_iommu_iova_to_phys,
+       .map            = sun50i_iommu_map,
+       .of_xlate       = sun50i_iommu_of_xlate,
+       .probe_device   = sun50i_iommu_probe_device,
+       .release_device = sun50i_iommu_release_device,
+       .unmap          = sun50i_iommu_unmap,
+};
+
+static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
+                                     unsigned master, phys_addr_t iova,
+                                     unsigned prot)
+{
+       dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
+               &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
+
+       if (iommu->domain)
+               report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
+       else
+               dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
+}
+
+static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
+                                             unsigned addr_reg,
+                                             unsigned blame_reg)
+{
+       phys_addr_t iova;
+       unsigned master;
+       u32 blame;
+
+       assert_spin_locked(&iommu->iommu_lock);
+
+       iova = iommu_read(iommu, addr_reg);
+       blame = iommu_read(iommu, blame_reg);
+       master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+
+       /*
+        * If the address is not in the page table, we can't get what
+        * operation triggered the fault. Assume it's a read
+        * operation.
+        */
+       sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
+
+       return iova;
+}
+
+static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
+{
+       enum sun50i_iommu_aci aci;
+       phys_addr_t iova;
+       unsigned master;
+       unsigned dir;
+       u32 blame;
+
+       assert_spin_locked(&iommu->iommu_lock);
+
+       blame = iommu_read(iommu, IOMMU_INT_STA_REG);
+       master = ilog2(blame & IOMMU_INT_MASTER_MASK);
+       iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
+       aci = sun50i_get_pte_aci(iommu_read(iommu,
+                                           IOMMU_INT_ERR_DATA_REG(master)));
+
+       switch (aci) {
+               /*
+                * If we are in the read-only domain, then it means we
+                * tried to write.
+                */
+       case SUN50I_IOMMU_ACI_RD:
+               dir = IOMMU_FAULT_WRITE;
+               break;
+
+               /*
+                * If we are in the write-only domain, then it means
+                * we tried to read.
+                */
+       case SUN50I_IOMMU_ACI_WR:
+
+               /*
+                * If we are in the domain without any permission, we
+                * can't really tell. Let's default to a read
+                * operation.
+                */
+       case SUN50I_IOMMU_ACI_NONE:
+
+               /* WTF? */
+       case SUN50I_IOMMU_ACI_RD_WR:
+       default:
+               dir = IOMMU_FAULT_READ;
+               break;
+       }
+
+       /*
+        * If the address is not in the page table, we can't get what
+        * operation triggered the fault. Assume it's a read
+        * operation.
+        */
+       sun50i_iommu_report_fault(iommu, master, iova, dir);
+
+       return iova;
+}
+
+static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
+{
+       struct sun50i_iommu *iommu = dev_id;
+       phys_addr_t iova;
+       u32 status;
+
+       spin_lock(&iommu->iommu_lock);
+
+       status = iommu_read(iommu, IOMMU_INT_STA_REG);
+       if (!(status & IOMMU_INT_MASK)) {
+               spin_unlock(&iommu->iommu_lock);
+               return IRQ_NONE;
+       }
+
+       if (status & IOMMU_INT_INVALID_L2PG)
+               iova = sun50i_iommu_handle_pt_irq(iommu,
+                                                 IOMMU_INT_ERR_ADDR_L2_REG,
+                                                 IOMMU_L2PG_INT_REG);
+       else if (status & IOMMU_INT_INVALID_L1PG)
+               iova = sun50i_iommu_handle_pt_irq(iommu,
+                                                 IOMMU_INT_ERR_ADDR_L1_REG,
+                                                 IOMMU_L1PG_INT_REG);
+       else
+               iova = sun50i_iommu_handle_perm_irq(iommu);
+
+       iommu_write(iommu, IOMMU_INT_CLR_REG, status);
+
+       iommu_write(iommu, IOMMU_RESET_REG, ~status);
+       iommu_write(iommu, IOMMU_RESET_REG, status);
+
+       spin_unlock(&iommu->iommu_lock);
+
+       return IRQ_HANDLED;
+}
+
+static int sun50i_iommu_probe(struct platform_device *pdev)
+{
+       struct sun50i_iommu *iommu;
+       int ret, irq;
+
+       iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+       if (!iommu)
+               return -ENOMEM;
+       spin_lock_init(&iommu->iommu_lock);
+       platform_set_drvdata(pdev, iommu);
+       iommu->dev = &pdev->dev;
+
+       iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
+                                          PT_SIZE, PT_SIZE,
+                                          SLAB_HWCACHE_ALIGN,
+                                          NULL);
+       if (!iommu->pt_pool)
+               return -ENOMEM;
+
+       iommu->group = iommu_group_alloc();
+       if (IS_ERR(iommu->group)) {
+               ret = PTR_ERR(iommu->group);
+               goto err_free_cache;
+       }
+
+       iommu->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(iommu->base)) {
+               ret = PTR_ERR(iommu->base);
+               goto err_free_group;
+       }
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               ret = irq;
+               goto err_free_group;
+       }
+
+       iommu->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(iommu->clk)) {
+               dev_err(&pdev->dev, "Couldn't get our clock.\n");
+               ret = PTR_ERR(iommu->clk);
+               goto err_free_group;
+       }
+
+       iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(iommu->reset)) {
+               dev_err(&pdev->dev, "Couldn't get our reset line.\n");
+               ret = PTR_ERR(iommu->reset);
+               goto err_free_group;
+       }
+
+       ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
+                                    NULL, dev_name(&pdev->dev));
+       if (ret)
+               goto err_free_group;
+
+       iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
+       iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+       ret = iommu_device_register(&iommu->iommu);
+       if (ret)
+               goto err_remove_sysfs;
+
+       ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
+                              dev_name(&pdev->dev), iommu);
+       if (ret < 0)
+               goto err_unregister;
+
+       bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
+
+       return 0;
+
+err_unregister:
+       iommu_device_unregister(&iommu->iommu);
+
+err_remove_sysfs:
+       iommu_device_sysfs_remove(&iommu->iommu);
+
+err_free_group:
+       iommu_group_put(iommu->group);
+
+err_free_cache:
+       kmem_cache_destroy(iommu->pt_pool);
+
+       return ret;
+}
+
+static const struct of_device_id sun50i_iommu_dt[] = {
+       { .compatible = "allwinner,sun50i-h6-iommu", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
+
+static struct platform_driver sun50i_iommu_driver = {
+       .driver         = {
+               .name                   = "sun50i-iommu",
+               .of_match_table         = sun50i_iommu_dt,
+               .suppress_bind_attrs    = true,
+       }
+};
+builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
+
+MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index bda300c2a438a1928d3622fc423a6ccbe33f2d68..f6f07489a9aa57947e2c993f0854e5de91ee50ef 100644 (file)
@@ -453,7 +453,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
        if (!region)
                return -ENOMEM;
 
-       list_add(&vdev->resv_regions, &region->list);
+       list_add(&region->list, &vdev->resv_regions);
        return 0;
 }
 
index 23445ebfda5c1fea492de4ece16e7fd5bf32f78c..ec71063fff76a37f9913b19b0eda756e1b25518c 100644 (file)
@@ -306,6 +306,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
                        "(bn 0x%X, sn 0x%X) failed to map driver user space!",
                        tpci200->info->pdev->bus->number,
                        tpci200->info->pdev->devfn);
+               res = -ENOMEM;
                goto out_release_mem8_space;
        }
 
index 58fd137b6ae1a041c8503a62bef175a27a70ff16..3e500098132f1307ae82259a2aba1ec324496bf7 100644 (file)
@@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 
        /* Do we need to select a new pgpath? */
        pgpath = READ_ONCE(m->current_pgpath);
-       queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
-       if (!pgpath || !queue_io)
+       if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
                pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 
+       /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
+       queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+
        if ((pgpath && queue_io) ||
            (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
                /* Queue for the daemon to resubmit */
index 49147e634046da9caa4309e2d7ce712b545e131f..fb41b4f23c4891bd816624f18f8d67b51fbd3e6c 100644 (file)
@@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
        fio->level++;
 
        if (type == DM_VERITY_BLOCK_TYPE_METADATA)
-               block += v->data_blocks;
+               block = block - v->hash_start + v->data_blocks;
 
        /*
         * For RS(M, N), the continuous FEC data is divided into blocks of N
index 114927da9cc9a70689e1badf04ebad06dd9a52c8..613c171b1b6d2f37d49b4bfbdc9b55e7bedd0527 100644 (file)
@@ -931,6 +931,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
        return 0;
 }
 
+static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
+{
+       struct dm_io_region region;
+       struct dm_io_request req;
+
+       region.bdev = wc->ssd_dev->bdev;
+       region.sector = wc->start_sector;
+       region.count = n_sectors;
+       req.bi_op = REQ_OP_READ;
+       req.bi_op_flags = REQ_SYNC;
+       req.mem.type = DM_IO_VMA;
+       req.mem.ptr.vma = (char *)wc->memory_map;
+       req.client = wc->dm_io;
+       req.notify.fn = NULL;
+
+       return dm_io(&req, 1, &region, NULL);
+}
+
 static void writecache_resume(struct dm_target *ti)
 {
        struct dm_writecache *wc = ti->private;
@@ -941,8 +959,18 @@ static void writecache_resume(struct dm_target *ti)
 
        wc_lock(wc);
 
-       if (WC_MODE_PMEM(wc))
+       if (WC_MODE_PMEM(wc)) {
                persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+       } else {
+               r = writecache_read_metadata(wc, wc->metadata_sectors);
+               if (r) {
+                       size_t sb_entries_offset;
+                       writecache_error(wc, r, "unable to read metadata: %d", r);
+                       sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
+                       memset((char *)wc->memory_map + sb_entries_offset, -1,
+                              (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
+               }
+       }
 
        wc->tree = RB_ROOT;
        INIT_LIST_HEAD(&wc->lru);
@@ -2102,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
                ti->error = "Invalid block size";
                goto bad;
        }
+       if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
+           wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
+               r = -EINVAL;
+               ti->error = "Block size is smaller than device logical block size";
+               goto bad;
+       }
        wc->block_size_bits = __ffs(wc->block_size);
 
        wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2200,8 +2234,6 @@ invalid_optional:
                        goto bad;
                }
        } else {
-               struct dm_io_region region;
-               struct dm_io_request req;
                size_t n_blocks, n_metadata_blocks;
                uint64_t n_bitmap_bits;
 
@@ -2258,19 +2290,9 @@ invalid_optional:
                        goto bad;
                }
 
-               region.bdev = wc->ssd_dev->bdev;
-               region.sector = wc->start_sector;
-               region.count = wc->metadata_sectors;
-               req.bi_op = REQ_OP_READ;
-               req.bi_op_flags = REQ_SYNC;
-               req.mem.type = DM_IO_VMA;
-               req.mem.ptr.vma = (char *)wc->memory_map;
-               req.client = wc->dm_io;
-               req.notify.fn = NULL;
-
-               r = dm_io(&req, 1, &region, NULL);
+               r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
                if (r) {
-                       ti->error = "Unable to read metadata";
+                       ti->error = "Unable to read first block of metadata";
                        goto bad;
                }
        }
index 06038b325b023a815c277de89854536e495e3be6..55da6428ceb01909b5fd8ff80c59b89420655121 100644 (file)
@@ -142,6 +142,9 @@ static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
 
        rtsx_disable_aspm(pcr);
 
+       /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
+       msleep(1);
+
        if (option->ltr_enabled)
                rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
 
index 204d807e755b4d639bd9ea80279077104d336b7e..b32c825a09459e56e64a5e6d07f0454cff91d5b4 100644 (file)
@@ -266,6 +266,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
        down_write(&dev->me_clients_rwsem);
        me_cl = __mei_me_cl_by_uuid(dev, uuid);
        __mei_me_cl_del(dev, me_cl);
+       mei_me_cl_put(me_cl);
        up_write(&dev->me_clients_rwsem);
 }
 
@@ -287,6 +288,7 @@ void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
        down_write(&dev->me_clients_rwsem);
        me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
        __mei_me_cl_del(dev, me_cl);
+       mei_me_cl_put(me_cl);
        up_write(&dev->me_clients_rwsem);
 }
 
index 668418d7ea770924b778b070d051e1e63a83d213..f620442addf529ea5d7a663f0e57a61b03a3c966 100644 (file)
@@ -1465,6 +1465,13 @@ static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_DMA_128,
 };
 
+/* LBG with quirk for SPS Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_FW_SPS,
+};
+
 /* Tiger Lake and newer devices */
 static const struct mei_cfg mei_me_pch15_cfg = {
        MEI_CFG_PCH8_HFS,
@@ -1487,6 +1494,7 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
        [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
        [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+       [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
        [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
 };
 
index 4a8d4dcd5a91b449f8a3a7c1cd8111f186acc5d7..b6b94e2114645f8a18fb503a5d91908e625857de 100644 (file)
@@ -80,6 +80,9 @@ struct mei_me_hw {
  *                         servers platforms with quirk for
  *                         SPS firmware exclusion.
  * @MEI_ME_PCH12_CFG:      Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_CFG:  Platform Controller Hub Gen12 and newer
+ *                         servers platforms with quirk for
+ *                         SPS firmware exclusion.
  * @MEI_ME_PCH15_CFG:      Platform Controller Hub Gen15 and newer
  * @MEI_ME_NUM_CFG:        Upper Sentinel.
  */
@@ -93,6 +96,7 @@ enum mei_cfg_idx {
        MEI_ME_PCH8_CFG,
        MEI_ME_PCH8_SPS_CFG,
        MEI_ME_PCH12_CFG,
+       MEI_ME_PCH12_SPS_CFG,
        MEI_ME_PCH15_CFG,
        MEI_ME_NUM_CFG,
 };
index 0c390fe421ad3642d70ca6bd288dda78203fa543..a1ed375fed37443084d5eabbcd74faef157ec52d 100644 (file)
@@ -70,7 +70,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
index d39307f060bd6eb0aec1c645bee88470e131eb3c..107028e77ca3753bb88cfade001609f42b95b345 100644 (file)
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
 }
 #endif
 
-static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
-                         void *data)
+static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
 {
-       struct uacce_mm *uacce_mm = data;
-       struct uacce_queue *q;
-
-       /*
-        * No new queue can be added concurrently because no caller can have a
-        * reference to this mm. But there may be concurrent calls to
-        * uacce_mm_put(), so we need the lock.
-        */
-       mutex_lock(&uacce_mm->lock);
-       list_for_each_entry(q, &uacce_mm->queues, list)
-               uacce_put_queue(q);
-       uacce_mm->mm = NULL;
-       mutex_unlock(&uacce_mm->lock);
+       int pasid;
+       struct iommu_sva *handle;
 
-       return 0;
-}
-
-static struct iommu_sva_ops uacce_sva_ops = {
-       .mm_exit = uacce_sva_exit,
-};
-
-static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
-                                    struct uacce_queue *q,
-                                    struct mm_struct *mm)
-{
-       struct uacce_mm *uacce_mm = NULL;
-       struct iommu_sva *handle = NULL;
-       int ret;
-
-       lockdep_assert_held(&uacce->mm_lock);
-
-       list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
-               if (uacce_mm->mm == mm) {
-                       mutex_lock(&uacce_mm->lock);
-                       list_add(&q->list, &uacce_mm->queues);
-                       mutex_unlock(&uacce_mm->lock);
-                       return uacce_mm;
-               }
-       }
-
-       uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
-       if (!uacce_mm)
-               return NULL;
+       if (!(uacce->flags & UACCE_DEV_SVA))
+               return 0;
 
-       if (uacce->flags & UACCE_DEV_SVA) {
-               /*
-                * Safe to pass an incomplete uacce_mm, since mm_exit cannot
-                * fire while we hold a reference to the mm.
-                */
-               handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
-               if (IS_ERR(handle))
-                       goto err_free;
+       handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
 
-               ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
-               if (ret)
-                       goto err_unbind;
-
-               uacce_mm->pasid = iommu_sva_get_pasid(handle);
-               if (uacce_mm->pasid == IOMMU_PASID_INVALID)
-                       goto err_unbind;
+       pasid = iommu_sva_get_pasid(handle);
+       if (pasid == IOMMU_PASID_INVALID) {
+               iommu_sva_unbind_device(handle);
+               return -ENODEV;
        }
 
-       uacce_mm->mm = mm;
-       uacce_mm->handle = handle;
-       INIT_LIST_HEAD(&uacce_mm->queues);
-       mutex_init(&uacce_mm->lock);
-       list_add(&q->list, &uacce_mm->queues);
-       list_add(&uacce_mm->list, &uacce->mm_list);
-
-       return uacce_mm;
-
-err_unbind:
-       if (handle)
-               iommu_sva_unbind_device(handle);
-err_free:
-       kfree(uacce_mm);
-       return NULL;
+       q->handle = handle;
+       q->pasid = pasid;
+       return 0;
 }
 
-static void uacce_mm_put(struct uacce_queue *q)
+static void uacce_unbind_queue(struct uacce_queue *q)
 {
-       struct uacce_mm *uacce_mm = q->uacce_mm;
-
-       lockdep_assert_held(&q->uacce->mm_lock);
-
-       mutex_lock(&uacce_mm->lock);
-       list_del(&q->list);
-       mutex_unlock(&uacce_mm->lock);
-
-       if (list_empty(&uacce_mm->queues)) {
-               if (uacce_mm->handle)
-                       iommu_sva_unbind_device(uacce_mm->handle);
-               list_del(&uacce_mm->list);
-               kfree(uacce_mm);
-       }
+       if (!q->handle)
+               return;
+       iommu_sva_unbind_device(q->handle);
+       q->handle = NULL;
 }
 
 static int uacce_fops_open(struct inode *inode, struct file *filep)
 {
-       struct uacce_mm *uacce_mm = NULL;
        struct uacce_device *uacce;
        struct uacce_queue *q;
        int ret = 0;
@@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
        if (!q)
                return -ENOMEM;
 
-       mutex_lock(&uacce->mm_lock);
-       uacce_mm = uacce_mm_get(uacce, q, current->mm);
-       mutex_unlock(&uacce->mm_lock);
-       if (!uacce_mm) {
-               ret = -ENOMEM;
+       ret = uacce_bind_queue(uacce, q);
+       if (ret)
                goto out_with_mem;
-       }
 
        q->uacce = uacce;
-       q->uacce_mm = uacce_mm;
 
        if (uacce->ops->get_queue) {
-               ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+               ret = uacce->ops->get_queue(uacce, q->pasid, q);
                if (ret < 0)
-                       goto out_with_mm;
+                       goto out_with_bond;
        }
 
        init_waitqueue_head(&q->wait);
@@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
        uacce->inode = inode;
        q->state = UACCE_Q_INIT;
 
+       mutex_lock(&uacce->queues_lock);
+       list_add(&q->list, &uacce->queues);
+       mutex_unlock(&uacce->queues_lock);
+
        return 0;
 
-out_with_mm:
-       mutex_lock(&uacce->mm_lock);
-       uacce_mm_put(q);
-       mutex_unlock(&uacce->mm_lock);
+out_with_bond:
+       uacce_unbind_queue(q);
 out_with_mem:
        kfree(q);
        return ret;
@@ -241,14 +168,12 @@ out_with_mem:
 static int uacce_fops_release(struct inode *inode, struct file *filep)
 {
        struct uacce_queue *q = filep->private_data;
-       struct uacce_device *uacce = q->uacce;
 
+       mutex_lock(&q->uacce->queues_lock);
+       list_del(&q->list);
+       mutex_unlock(&q->uacce->queues_lock);
        uacce_put_queue(q);
-
-       mutex_lock(&uacce->mm_lock);
-       uacce_mm_put(q);
-       mutex_unlock(&uacce->mm_lock);
-
+       uacce_unbind_queue(q);
        kfree(q);
 
        return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
        if (ret < 0)
                goto err_with_uacce;
 
-       INIT_LIST_HEAD(&uacce->mm_list);
-       mutex_init(&uacce->mm_lock);
+       INIT_LIST_HEAD(&uacce->queues);
+       mutex_init(&uacce->queues_lock);
        device_initialize(&uacce->dev);
        uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
        uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
  */
 void uacce_remove(struct uacce_device *uacce)
 {
-       struct uacce_mm *uacce_mm;
-       struct uacce_queue *q;
+       struct uacce_queue *q, *next_q;
 
        if (!uacce)
                return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
                unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
 
        /* ensure no open queue remains */
-       mutex_lock(&uacce->mm_lock);
-       list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
-               /*
-                * We don't take the uacce_mm->lock here. Since we hold the
-                * device's mm_lock, no queue can be added to or removed from
-                * this uacce_mm. We may run concurrently with mm_exit, but
-                * uacce_put_queue() is serialized and iommu_sva_unbind_device()
-                * waits for the lock that mm_exit is holding.
-                */
-               list_for_each_entry(q, &uacce_mm->queues, list)
-                       uacce_put_queue(q);
-
-               if (uacce->flags & UACCE_DEV_SVA) {
-                       iommu_sva_unbind_device(uacce_mm->handle);
-                       uacce_mm->handle = NULL;
-               }
+       mutex_lock(&uacce->queues_lock);
+       list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+               uacce_put_queue(q);
+               uacce_unbind_queue(q);
        }
-       mutex_unlock(&uacce->mm_lock);
+       mutex_unlock(&uacce->queues_lock);
 
        /* disable sva now since no opened queues */
        if (uacce->flags & UACCE_DEV_SVA)
index 8499b56a15a8139211402d5b35048a411a3a391d..7896952de1ac75a5bc384eb065868732b64c02e3 100644 (file)
@@ -1370,6 +1370,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_request *mrq = &mqrq->brq.mrq;
        struct request_queue *q = req->q;
        struct mmc_host *host = mq->card->host;
+       enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
        unsigned long flags;
        bool put_card;
        int err;
@@ -1399,7 +1400,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
 
        spin_lock_irqsave(&mq->lock, flags);
 
-       mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+       mq->in_flight[issue_type] -= 1;
 
        put_card = (mmc_tot_in_flight(mq) == 0);
 
@@ -2483,8 +2484,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
        struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
                                                  struct mmc_rpmb_data, chrdev);
 
-       put_device(&rpmb->dev);
        mmc_blk_put(rpmb->md);
+       put_device(&rpmb->dev);
 
        return 0;
 }
index 5bd0ab8b236a5707040b5c13bb5e204ff3a5938e..baa6314f69b411d80eb97d4d82480203b5338892 100644 (file)
@@ -878,7 +878,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
  *     Issued High Priority Interrupt, and check for card status
  *     until out-of prg-state.
  */
-int mmc_interrupt_hpi(struct mmc_card *card)
+static int mmc_interrupt_hpi(struct mmc_card *card)
 {
        int err;
        u32 status;
index 25bee3daf9e2edc9e31003c288b7209a1baf5e27..4b1eb89b401d98a4c223744c320bedf82eaee2b9 100644 (file)
@@ -107,11 +107,10 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
        case MMC_ISSUE_DCMD:
                if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
                        if (recovery_needed)
-                               __mmc_cqe_recovery_notifier(mq);
+                               mmc_cqe_recovery_notifier(mrq);
                        return BLK_EH_RESET_TIMER;
                }
-               /* No timeout (XXX: huh? comment doesn't make much sense) */
-               blk_mq_complete_request(req);
+               /* The request has gone already */
                return BLK_EH_DONE;
        default:
                /* Timeout is handled by mmc core */
@@ -127,18 +126,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
        struct mmc_card *card = mq->card;
        struct mmc_host *host = card->host;
        unsigned long flags;
-       int ret;
+       bool ignore_tout;
 
        spin_lock_irqsave(&mq->lock, flags);
-
-       if (mq->recovery_needed || !mq->use_cqe || host->hsq_enabled)
-               ret = BLK_EH_RESET_TIMER;
-       else
-               ret = mmc_cqe_timed_out(req);
-
+       ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
        spin_unlock_irqrestore(&mq->lock, flags);
 
-       return ret;
+       return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
 }
 
 static void mmc_mq_recovery_handler(struct work_struct *work)
index 1aee485d56d4c4a13b9b899f1e1bba88613ac1cd..026ca9194ce5b91dbe9bf58cb573bf4282092c8e 100644 (file)
@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
 
        if (ret) {
                dev_err(&pdev->dev, "Failed to get irq for data line\n");
-               return ret;
+               goto free_host;
        }
 
        mutex_init(&host->cmd_mutex);
@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, host);
        mmc_add_host(mmc);
        return 0;
+
+free_host:
+       mmc_free_host(mmc);
+       return ret;
 }
 
 static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
index c2239ee2c0ef74ac1c1e6a102388d0d3e6ddf6ab..75934f3c117eba9576ceb7566f21033876eb463b 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/delay.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
@@ -349,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 /* CQHCI is idle and should halt immediately, so set a small timeout */
 #define CQHCI_OFF_TIMEOUT 100
 
+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+       return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
 static void cqhci_off(struct mmc_host *mmc)
 {
        struct cqhci_host *cq_host = mmc->cqe_private;
-       ktime_t timeout;
-       bool timed_out;
        u32 reg;
+       int err;
 
        if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
                return;
@@ -364,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc)
 
        cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 
-       timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
-       while (1) {
-               timed_out = ktime_compare(ktime_get(), timeout) > 0;
-               reg = cqhci_readl(cq_host, CQHCI_CTL);
-               if ((reg & CQHCI_HALT) || timed_out)
-                       break;
-       }
-
-       if (timed_out)
+       err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+                                reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+       if (err < 0)
                pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
        else
                pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
index 8b038e7b2cd312e2c5e3d123b8a148595aadbe1d..2e58743d83bb5feb4fbe28c531d27605904c476c 100644 (file)
@@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                meson_mx_mmc_start_cmd(mmc, mrq->cmd);
 }
 
-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
-       struct meson_mx_mmc_host *host = mmc_priv(mmc);
-       u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
-       return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
 static void meson_mx_mmc_read_response(struct mmc_host *mmc,
                                       struct mmc_command *cmd)
 {
@@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
 static struct mmc_host_ops meson_mx_mmc_ops = {
        .request                = meson_mx_mmc_request,
        .set_ios                = meson_mx_mmc_set_ios,
-       .card_busy              = meson_mx_mmc_card_busy,
        .get_cd                 = mmc_gpio_get_cd,
        .get_ro                 = mmc_gpio_get_ro,
 };
@@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
        mmc->f_max = clk_round_rate(host->cfg_div_clk,
                                    clk_get_rate(host->parent_clk));
 
-       mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+       mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
        mmc->ops = &meson_mx_mmc_ops;
 
        ret = mmc_of_parse(mmc);
index faba53cf139b5d2dbd18048cc5a956cfed86fd3f..d8b76cb8698aa1953d19c253806a7ea9ef025267 100644 (file)
@@ -605,10 +605,12 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
 }
 
 static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
-       .chip   = &sdhci_acpi_chip_amd,
-       .caps   = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
-       .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
-                       SDHCI_QUIRK_32BIT_ADMA_SIZE,
+       .chip           = &sdhci_acpi_chip_amd,
+       .caps           = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+       .quirks         = SDHCI_QUIRK_32BIT_DMA_ADDR |
+                         SDHCI_QUIRK_32BIT_DMA_SIZE |
+                         SDHCI_QUIRK_32BIT_ADMA_SIZE,
+       .quirks2        = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
        .probe_slot     = sdhci_acpi_emmc_amd_probe_slot,
 };
 
index 09ff7315eb5ee8dfeb82f97bad4f0ef0300a993b..a8bcb3f16aa403a316ad705a94ad410ed8b6a682 100644 (file)
@@ -2087,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
index 525de2454a4de1d9d17b628d6d27a2898eaea36c..2527244c2ae16f1e7c2c11491c4c3be3a49fc9f9 100644 (file)
@@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
        struct sdhci_pci_slot *slot = sdhci_priv(host);
        struct intel_host *intel_host = sdhci_pci_priv(slot);
 
+       if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+               return 0;
+
        return intel_host->drv_strength;
 }
 
index ce15a05f23d41988dcbb36f2d08dc8cd6733cc4f..fd76aa672e020445a2ee3777ab0cf623bb8b436c 100644 (file)
@@ -26,6 +26,9 @@
 #define   SDHCI_GLI_9750_DRIVING_2    GENMASK(27, 26)
 #define   GLI_9750_DRIVING_1_VALUE    0xFFF
 #define   GLI_9750_DRIVING_2_VALUE    0x3
+#define   SDHCI_GLI_9750_SEL_1        BIT(29)
+#define   SDHCI_GLI_9750_SEL_2        BIT(31)
+#define   SDHCI_GLI_9750_ALL_RST      (BIT(24)|BIT(25)|BIT(28)|BIT(30))
 
 #define SDHCI_GLI_9750_PLL           0x864
 #define   SDHCI_GLI_9750_PLL_TX2_INV    BIT(23)
@@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
                                    GLI_9750_DRIVING_1_VALUE);
        driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
                                    GLI_9750_DRIVING_2_VALUE);
+       driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
+       driving_value |= SDHCI_GLI_9750_SEL_2;
        sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
 
        sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
@@ -334,6 +339,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
        return value;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+
+       pci_free_irq_vectors(slot->chip->pdev);
+       gli_pcie_enable_msi(slot);
+
+       return sdhci_pci_resume_host(chip);
+}
+#endif
+
 static const struct sdhci_ops sdhci_gl9755_ops = {
        .set_clock              = sdhci_set_clock,
        .enable_dma             = sdhci_pci_enable_dma,
@@ -348,6 +365,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
        .quirks2        = SDHCI_QUIRK2_BROKEN_DDR50,
        .probe_slot     = gli_probe_slot_gl9755,
        .ops            = &sdhci_gl9755_ops,
+#ifdef CONFIG_PM_SLEEP
+       .resume         = sdhci_pci_gli_resume,
+#endif
 };
 
 static const struct sdhci_ops sdhci_gl9750_ops = {
@@ -366,4 +386,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
        .quirks2        = SDHCI_QUIRK2_BROKEN_DDR50,
        .probe_slot     = gli_probe_slot_gl9750,
        .ops            = &sdhci_gl9750_ops,
+#ifdef CONFIG_PM_SLEEP
+       .resume         = sdhci_pci_gli_resume,
+#endif
 };
index 1dea1ba66f7b4e8a05e48a8a0f4fbac8875c2a14..4703cd540c7fd89dc67b6399c37fce24f88fed5a 100644 (file)
@@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
 {
        /* Wait for 5ms after set 1.8V signal enable bit */
        usleep_range(5000, 5500);
+
+       /*
+        * For some reason the controller's Host Control2 register reports
+        * the bit representing 1.8V signaling as 0 when read after it was
+        * written as 1. Subsequent read reports 1.
+        *
+        * Since this may cause some issues, do an empty read of the Host
+        * Control2 register here to circumvent this.
+        */
+       sdhci_readw(host, SDHCI_HOST_CONTROL2);
 }
 
 static const struct sdhci_ops sdhci_xenon_ops = {
index 3f716466fcfd04a1ed7b0e992ee9a072a9126104..e368f2dabf209235697a29fb6ddcf99c35494b45 100644 (file)
@@ -4000,9 +4000,6 @@ int sdhci_setup_host(struct sdhci_host *host)
                       mmc_hostname(mmc), host->version);
        }
 
-       if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
-               mmc->caps2 &= ~MMC_CAP2_CQE;
-
        if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
                host->flags |= SDHCI_USE_SDMA;
        else if (!(host->caps & SDHCI_CAN_DO_SDMA))
@@ -4539,6 +4536,12 @@ int __sdhci_add_host(struct sdhci_host *host)
        struct mmc_host *mmc = host->mmc;
        int ret;
 
+       if ((mmc->caps2 & MMC_CAP2_CQE) &&
+           (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
+               mmc->caps2 &= ~MMC_CAP2_CQE;
+               mmc->cqe_ops = NULL;
+       }
+
        host->complete_wq = alloc_workqueue("sdhci", flags, 0);
        if (!host->complete_wq)
                return -ENOMEM;
index 06426fc5c990ba1bbdd56564f67b59ce3068e2ce..f781c46cd4af9afc2458a1fe0d53c95bbbb8abfb 100644 (file)
@@ -1483,7 +1483,7 @@ static void __exit most_exit(void)
        ida_destroy(&mdev_id);
 }
 
-module_init(most_init);
+subsys_initcall(most_init);
 module_exit(most_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
index 2916674208b32643950bae373e7380be4d66ab7a..29d41003d6e0d2f57e625db58df115a5d18de487 100644 (file)
@@ -555,7 +555,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
 
        config.id = -1;
        config.dev = &mtd->dev;
-       config.name = mtd->name;
+       config.name = dev_name(&mtd->dev);
        config.owner = THIS_MODULE;
        config.reg_read = mtd_nvmem_reg_read;
        config.size = mtd->size;
index e4e3ceeac38f84b02e88fb9f9686a3a4654eb9c2..8f9ffb46a09f3ede73c6ada9df22673bcde2b264 100644 (file)
@@ -2728,9 +2728,8 @@ static int brcmnand_resume(struct device *dev)
                flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
        }
 
-       if (has_edu(ctrl))
+       if (has_edu(ctrl)) {
                ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG);
-       else {
                edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config);
                edu_readl(ctrl, EDU_CONFIG);
                brcmnand_edu_init(ctrl);
index b6bb358b96ceaf224362a0c923655e2d3722bdf7..e2c382ffc5b6990ffbb81c75f6b523870a7639c3 100644 (file)
@@ -1089,6 +1089,10 @@ static int spinand_init(struct spinand_device *spinand)
 
        mtd->oobavail = ret;
 
+       /* Propagate ECC information to mtd_info */
+       mtd->ecc_strength = nand->eccreq.strength;
+       mtd->ecc_step_size = nand->eccreq.step_size;
+
        return 0;
 
 err_cleanup_nanddev:
index 54646c2c2744a479f8cffa3d179e19cde8890faa..ac2bdba8bb1a3b200338e73903e16987dcfbb81e 100644 (file)
@@ -393,9 +393,6 @@ static void *eraseblk_count_seq_start(struct seq_file *s, loff_t *pos)
 {
        struct ubi_device *ubi = s->private;
 
-       if (*pos == 0)
-               return SEQ_START_TOKEN;
-
        if (*pos < ubi->peb_count)
                return pos;
 
@@ -409,8 +406,6 @@ static void *eraseblk_count_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
        struct ubi_device *ubi = s->private;
 
-       if (v == SEQ_START_TOKEN)
-               return pos;
        (*pos)++;
 
        if (*pos < ubi->peb_count)
@@ -432,11 +427,8 @@ static int eraseblk_count_seq_show(struct seq_file *s, void *iter)
        int err;
 
        /* If this is the start, print a header */
-       if (iter == SEQ_START_TOKEN) {
-               seq_puts(s,
-                        "physical_block_number\terase_count\tblock_status\tread_status\n");
-               return 0;
-       }
+       if (*block_number == 0)
+               seq_puts(s, "physical_block_number\terase_count\n");
 
        err = ubi_io_is_bad(ubi, *block_number);
        if (err)
index cc0703c3d57f24f75d372e704ba6f224505a9663..efd1a1d1f35e06f31606bff97fc70f04f2d07562 100644 (file)
@@ -136,25 +136,21 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
 
-       if (family == AF_INET)
+       if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
                err = IP_ECN_decapsulate(oiph, skb);
-#if IS_ENABLED(CONFIG_IPV6)
        else
                err = IP6_ECN_decapsulate(oiph, skb);
-#endif
 
        if (unlikely(err)) {
                if (log_ecn_error) {
-                       if  (family == AF_INET)
+                       if  (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
                                net_info_ratelimited("non-ECT from %pI4 "
                                                     "with TOS=%#x\n",
                                                     &((struct iphdr *)oiph)->saddr,
                                                     ((struct iphdr *)oiph)->tos);
-#if IS_ENABLED(CONFIG_IPV6)
                        else
                                net_info_ratelimited("non-ECT from %pI6\n",
                                                     &((struct ipv6hdr *)oiph)->saddr);
-#endif
                }
                if (err > 1) {
                        ++bareudp->dev->stats.rx_frame_errors;
@@ -350,7 +346,6 @@ free_dst:
        return err;
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                             struct bareudp_dev *bareudp,
                             const struct ip_tunnel_info *info)
@@ -411,7 +406,6 @@ free_dst:
        dst_release(dst);
        return err;
 }
-#endif
 
 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -435,11 +429,9 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        rcu_read_lock();
-#if IS_ENABLED(CONFIG_IPV6)
-       if (info->mode & IP_TUNNEL_INFO_IPV6)
+       if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
                err = bareudp6_xmit_skb(skb, dev, bareudp, info);
        else
-#endif
                err = bareudp_xmit_skb(skb, dev, bareudp, info);
 
        rcu_read_unlock();
@@ -467,7 +459,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
 
        use_cache = ip_tunnel_dst_cache_usable(skb, info);
 
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
                struct rtable *rt;
                __be32 saddr;
 
@@ -478,7 +470,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
 
                ip_rt_put(rt);
                info->key.u.ipv4.src = saddr;
-#if IS_ENABLED(CONFIG_IPV6)
        } else if (ip_tunnel_info_af(info) == AF_INET6) {
                struct dst_entry *dst;
                struct in6_addr saddr;
@@ -492,7 +483,6 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
 
                dst_release(dst);
                info->key.u.ipv6.src = saddr;
-#endif
        } else {
                return -EINVAL;
        }
index 007481557191fee55f1157b801bcba6b3a56d69d..9b8346638f697199a5bf953475d660bcf07e1fc8 100644 (file)
@@ -149,8 +149,10 @@ int bond_sysfs_slave_add(struct slave *slave)
 
        err = kobject_init_and_add(&slave->kobj, &slave_ktype,
                                   &(slave->dev->dev.kobj), "bonding_slave");
-       if (err)
+       if (err) {
+               kobject_put(&slave->kobj);
                return err;
+       }
 
        for (a = slave_attrs; *a; ++a) {
                err = sysfs_create_file(&slave->kobj, &((*a)->attr));
index 04d59bede5ea2ce72390d994edf87472db5e4e4f..74503cacf59418797a2cdb3f642ded4b7e373bad 100644 (file)
@@ -947,8 +947,11 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
        u32 id, rev;
 
        addr = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(addr))
+               return PTR_ERR(addr);
+
        irq = platform_get_irq(pdev, 0);
-       if (IS_ERR(addr) || irq < 0)
+       if (irq < 0)
                return -EINVAL;
 
        id = readl(addr + IFI_CANFD_IP_ID);
index e3ba8ab0cbf44c977ec096deba9dcb9af18648fe..e2c6cf4b2228f0eeb0752a35d54275a7f2acefed 100644 (file)
@@ -792,7 +792,7 @@ static int sun4ican_probe(struct platform_device *pdev)
 
        addr = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(addr)) {
-               err = -EBUSY;
+               err = PTR_ERR(addr);
                goto exit;
        }
 
index 0a1be5259be0c911d6e9fc5f18c3447c2e58bc46..38cd8285ac6793c9bc27013e495941096e36ef21 100644 (file)
@@ -609,7 +609,7 @@ static int b53_srab_probe(struct platform_device *pdev)
 
        priv->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(priv->regs))
-               return -ENOMEM;
+               return PTR_ERR(priv->regs);
 
        dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv);
        if (!dev)
index fdcb70b9f0e4d908af502a14924ace21c26279b8..400207c5c7dedb5c9ea1da84e5657253aac4fc96 100644 (file)
@@ -360,6 +360,7 @@ static void __exit dsa_loop_exit(void)
 }
 module_exit(dsa_loop_exit);
 
+MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Fainelli");
 MODULE_DESCRIPTION("DSA loopback driver");
index 5c444cd722bdcde79e2037ddea815f9ad2780e8f..34e4aadfa705bdaef1747f95061caf54adfa4b80 100644 (file)
@@ -628,11 +628,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
        mt7530_write(priv, MT7530_PVC_P(port),
                     PORT_SPEC_TAG);
 
-       /* Disable auto learning on the cpu port */
-       mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
-
-       /* Unknown unicast frame fordwarding to the cpu port */
-       mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
+       /* Unknown multicast frame forwarding to the cpu port */
+       mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
 
        /* Set CPU port number */
        if (priv->id == ID_MT7621)
@@ -1294,8 +1291,6 @@ mt7530_setup(struct dsa_switch *ds)
        /* Enable and reset MIB counters */
        mt7530_mib_reset(ds);
 
-       mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
-
        for (i = 0; i < MT7530_NUM_PORTS; i++) {
                /* Disable forwarding by default on all ports */
                mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
index 979bb6374678fda639e2da2b732d707145e6ee6b..82af4d2d406e3fa1f9d65d5d58f5a2b8401efc68 100644 (file)
@@ -31,6 +31,7 @@ enum {
 #define MT7530_MFC                     0x10
 #define  BC_FFP(x)                     (((x) & 0xff) << 24)
 #define  UNM_FFP(x)                    (((x) & 0xff) << 16)
+#define  UNM_FFP_MASK                  UNM_FFP(~0)
 #define  UNU_FFP(x)                    (((x) & 0xff) << 8)
 #define  UNU_FFP_MASK                  UNU_FFP(~0)
 #define  CPU_EN                                BIT(7)
index 6435020d690dd5ae35c7c0fe0b8d0a9fc6c12d16..51185e4d7d15ed548d82ef870cb0f392b6e66d10 100644 (file)
@@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP
        bool "PTP support for Marvell 88E6xxx"
        default n
        depends on NET_DSA_MV88E6XXX_GLOBAL2
+       depends on PTP_1588_CLOCK
        imply NETWORK_PHY_TIMESTAMPING
-       imply PTP_1588_CLOCK
        help
          Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
          chips that support it.
index dd8a5666a584f54141e4cd745c421139521d76bd..2b4a723c830656f38e5e02b8491b9aae3a8ec91b 100644 (file)
@@ -3962,7 +3962,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .phylink_validate = mv88e6390_phylink_validate,
 };
@@ -4021,7 +4020,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .phylink_validate = mv88e6390x_phylink_validate,
 };
@@ -4079,7 +4077,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .avb_ops = &mv88e6390_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
        .phylink_validate = mv88e6390_phylink_validate,
@@ -4235,7 +4232,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .avb_ops = &mv88e6390_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
index d0a3764ff0cf83249f60c0b68dc6cde48f17295d..e113269c220a070fd38f8dd75f9101f8f51bd00e 100644 (file)
@@ -102,13 +102,17 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
                           const struct switchdev_obj_port_vlan *vlan)
 {
        struct ocelot *ocelot = ds->priv;
+       u16 flags = vlan->flags;
        u16 vid;
        int err;
 
+       if (dsa_is_cpu_port(ds, port))
+               flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
+
        for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
                err = ocelot_vlan_add(ocelot, port, vid,
-                                     vlan->flags & BRIDGE_VLAN_INFO_PVID,
-                                     vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+                                     flags & BRIDGE_VLAN_INFO_PVID,
+                                     flags & BRIDGE_VLAN_INFO_UNTAGGED);
                if (err) {
                        dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
                                vid, port, err);
@@ -388,6 +392,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        struct ocelot *ocelot = &felix->ocelot;
        phy_interface_t *port_phy_modes;
        resource_size_t switch_base;
+       struct resource res;
        int port, i, err;
 
        ocelot->num_phys_ports = num_phys_ports;
@@ -400,6 +405,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        ocelot->stats_layout    = felix->info->stats_layout;
        ocelot->num_stats       = felix->info->num_stats;
        ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+       ocelot->num_mact_rows   = felix->info->num_mact_rows;
        ocelot->vcap_is2_keys   = felix->info->vcap_is2_keys;
        ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
        ocelot->vcap            = felix->info->vcap;
@@ -421,17 +427,16 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
 
        for (i = 0; i < TARGET_MAX; i++) {
                struct regmap *target;
-               struct resource *res;
 
                if (!felix->info->target_io_res[i].name)
                        continue;
 
-               res = &felix->info->target_io_res[i];
-               res->flags = IORESOURCE_MEM;
-               res->start += switch_base;
-               res->end += switch_base;
+               memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
+               res.flags = IORESOURCE_MEM;
+               res.start += switch_base;
+               res.end += switch_base;
 
-               target = ocelot_regmap_init(ocelot, res);
+               target = ocelot_regmap_init(ocelot, &res);
                if (IS_ERR(target)) {
                        dev_err(ocelot->dev,
                                "Failed to map device memory space\n");
@@ -452,7 +457,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        for (port = 0; port < num_phys_ports; port++) {
                struct ocelot_port *ocelot_port;
                void __iomem *port_regs;
-               struct resource *res;
 
                ocelot_port = devm_kzalloc(ocelot->dev,
                                           sizeof(struct ocelot_port),
@@ -464,12 +468,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
                        return -ENOMEM;
                }
 
-               res = &felix->info->port_io_res[port];
-               res->flags = IORESOURCE_MEM;
-               res->start += switch_base;
-               res->end += switch_base;
+               memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
+               res.flags = IORESOURCE_MEM;
+               res.start += switch_base;
+               res.end += switch_base;
 
-               port_regs = devm_ioremap_resource(ocelot->dev, res);
+               port_regs = devm_ioremap_resource(ocelot->dev, &res);
                if (IS_ERR(port_regs)) {
                        dev_err(ocelot->dev,
                                "failed to map registers for port %d\n", port);
index 82d46f2600418882df9140fad975a66b54dabff0..730a8a90e1f79a20ce4d97fc71b5776c6135d577 100644 (file)
@@ -8,13 +8,14 @@
 
 /* Platform-specific information */
 struct felix_info {
-       struct resource                 *target_io_res;
-       struct resource                 *port_io_res;
-       struct resource                 *imdio_res;
+       const struct resource           *target_io_res;
+       const struct resource           *port_io_res;
+       const struct resource           *imdio_res;
        const struct reg_field          *regfields;
        const u32 *const                *map;
        const struct ocelot_ops         *ops;
        int                             shared_queue_sz;
+       int                             num_mact_rows;
        const struct ocelot_stat_layout *stats_layout;
        unsigned int                    num_stats;
        int                             num_ports;
index b4078f3c5c383d1b3f9a5bbb31d6992ff1657a08..5211f05ef2fbf48668f7931d5dfa6ad62700f73e 100644 (file)
@@ -333,10 +333,8 @@ static const u32 *vsc9959_regmap[] = {
        [GCB]   = vsc9959_gcb_regmap,
 };
 
-/* Addresses are relative to the PCI device's base address and
- * will be fixed up at ioremap time.
- */
-static struct resource vsc9959_target_io_res[] = {
+/* Addresses are relative to the PCI device's base address */
+static const struct resource vsc9959_target_io_res[] = {
        [ANA] = {
                .start  = 0x0280000,
                .end    = 0x028ffff,
@@ -379,7 +377,7 @@ static struct resource vsc9959_target_io_res[] = {
        },
 };
 
-static struct resource vsc9959_port_io_res[] = {
+static const struct resource vsc9959_port_io_res[] = {
        {
                .start  = 0x0100000,
                .end    = 0x010ffff,
@@ -415,7 +413,7 @@ static struct resource vsc9959_port_io_res[] = {
 /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
  * SGMII/QSGMII MAC PCS can be found.
  */
-static struct resource vsc9959_imdio_res = {
+static const struct resource vsc9959_imdio_res = {
        .start          = 0x8030,
        .end            = 0x8040,
        .name           = "imdio",
@@ -1111,7 +1109,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
        struct device *dev = ocelot->dev;
        resource_size_t imdio_base;
        void __iomem *imdio_regs;
-       struct resource *res;
+       struct resource res;
        struct enetc_hw *hw;
        struct mii_bus *bus;
        int port;
@@ -1128,12 +1126,12 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
        imdio_base = pci_resource_start(felix->pdev,
                                        felix->info->imdio_pci_bar);
 
-       res = felix->info->imdio_res;
-       res->flags = IORESOURCE_MEM;
-       res->start += imdio_base;
-       res->end += imdio_base;
+       memcpy(&res, felix->info->imdio_res, sizeof(res));
+       res.flags = IORESOURCE_MEM;
+       res.start += imdio_base;
+       res.end += imdio_base;
 
-       imdio_regs = devm_ioremap_resource(dev, res);
+       imdio_regs = devm_ioremap_resource(dev, &res);
        if (IS_ERR(imdio_regs)) {
                dev_err(dev, "failed to map internal MDIO registers\n");
                return PTR_ERR(imdio_regs);
@@ -1220,6 +1218,7 @@ struct felix_info felix_info_vsc9959 = {
        .vcap_is2_actions       = vsc9959_vcap_is2_actions,
        .vcap                   = vsc9959_vcap_props,
        .shared_queue_sz        = 128 * 1024,
+       .num_mact_rows          = 2048,
        .num_ports              = 6,
        .switch_pci_bar         = 4,
        .imdio_pci_bar          = 0,
index 0fe1ae173aa1a4b7c583fc087749e2daa3491d78..68c3086af9af810bf4c268c078482c5b20b74a2b 100644 (file)
@@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support"
 config NET_DSA_SJA1105_PTP
        bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
        depends on NET_DSA_SJA1105
+       depends on PTP_1588_CLOCK
        help
          This enables support for timestamping and PTP clock manipulations in
          the SJA1105 DSA driver.
index a22f8e3fc06baefd77202ed15a47f0c072ed3561..bc0e47c1dbb9b6c6460b54fc2fcca924afc6f88f 100644 (file)
 
 /* PTPSYNCTS has no interrupt or update mechanism, because the intended
  * hardware use case is for the timestamp to be collected synchronously,
- * immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC
- * pulse on the PTP_CLK pin. When used as a generic extts source, it needs
- * polling and a comparison with the old value. The polling interval is just
- * the Nyquist rate of a canonical PPS input (e.g. from a GPS module).
- * Anything of higher frequency than 1 Hz will be lost, since there is no
- * timestamp FIFO.
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
  */
-#define SJA1105_EXTTS_INTERVAL         (HZ / 2)
+#define SJA1105_EXTTS_INTERVAL         (HZ / 4)
 
 /*            This range is actually +/- SJA1105_MAX_ADJ_PPB
  *            divided by 1000 (ppb -> ppm) and with a 16-bit
@@ -754,7 +755,16 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
                return -EOPNOTSUPP;
 
        /* Reject requests with unsupported flags */
-       if (extts->flags)
+       if (extts->flags & ~(PTP_ENABLE_FEATURE |
+                            PTP_RISING_EDGE |
+                            PTP_FALLING_EDGE |
+                            PTP_STRICT_FLAGS))
+               return -EOPNOTSUPP;
+
+       /* We can only enable time stamping on both edges, sadly. */
+       if ((extts->flags & PTP_STRICT_FLAGS) &&
+           (extts->flags & PTP_ENABLE_FEATURE) &&
+           (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
                return -EOPNOTSUPP;
 
        rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
index 97dfd0c67e8475d0f9ff5cfa937ddd6043b9b10d..9e1860d81908db258d76f3db9f0c60d7e6404307 100644 (file)
@@ -69,7 +69,7 @@
  * 16kB.
  */
 #if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
+#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
 #else
 #define ENA_PAGE_SIZE PAGE_SIZE
 #endif
index a58185b1d8bf762cc37261f777d1f968044c62ac..3e3711b60d01bcae24bfec9e9f0f45d0b1d2cfdb 100644 (file)
@@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
        int i;
        unsigned short data;
 
-       for (i = 0; i < 6; i++)
+       for (i = 0; i < 3; i++)
                {
                        reset_and_select_srom(dev);
                        data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
index 2edf137a70304e6fa8eb9235d1d8a4d435de6e13..8a70ffe1d32668df7d1935c921fc2461e829d506 100644 (file)
@@ -57,7 +57,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
        { AQ_DEVICE_ID_D108,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
        { AQ_DEVICE_ID_D109,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
 
-       { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+       { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
        { AQ_DEVICE_ID_AQC107,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
        { AQ_DEVICE_ID_AQC108,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
        { AQ_DEVICE_ID_AQC109,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
index 53055ce5dfd61853a2be432a150f9dfd9b3af84a..2a69c0d06f3c083cf921cf19509c5a0a5bbed7b0 100644 (file)
@@ -69,6 +69,7 @@ config BCMGENET
        select BCM7XXX_PHY
        select MDIO_BCM_UNIMAC
        select DIMLIB
+       select BROADCOM_PHY if ARCH_BCM2835
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset.
index a5d1a6cb9ce3800f3c526d63f628e39625afd18e..6795b6d95f54d2c1854a396a38376bfbe10a3c22 100644 (file)
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct bgmac *bgmac;
+       struct resource *regs;
        const u8 *mac_addr;
 
        bgmac = bgmac_alloc(&pdev->dev);
@@ -206,16 +207,21 @@ static int bgmac_probe(struct platform_device *pdev)
        if (IS_ERR(bgmac->plat.base))
                return PTR_ERR(bgmac->plat.base);
 
-       bgmac->plat.idm_base =
-               devm_platform_ioremap_resource_byname(pdev, "idm_base");
-       if (IS_ERR(bgmac->plat.idm_base))
-               return PTR_ERR(bgmac->plat.idm_base);
-       bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+       if (regs) {
+               bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+               if (IS_ERR(bgmac->plat.idm_base))
+                       return PTR_ERR(bgmac->plat.idm_base);
+               bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+       }
 
-       bgmac->plat.nicpm_base =
-               devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
-       if (IS_ERR(bgmac->plat.nicpm_base))
-               return PTR_ERR(bgmac->plat.nicpm_base);
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+       if (regs) {
+               bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+                                                              regs);
+               if (IS_ERR(bgmac->plat.nicpm_base))
+                       return PTR_ERR(bgmac->plat.nicpm_base);
+       }
 
        bgmac->read = platform_bgmac_read;
        bgmac->write = platform_bgmac_write;
index fead64f1ad90e3d2688775e86d90a51805ffa254..58e0d9a781e9a70240603e4d57af63bf928d34c9 100644 (file)
@@ -4176,14 +4176,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        int i, intr_process, rc, tmo_count;
        struct input *req = msg;
        u32 *data = msg;
-       __le32 *resp_len;
        u8 *valid;
        u16 cp_ring_id, len = 0;
        struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
        u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
        struct hwrm_short_input short_input = {0};
        u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
-       u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
        u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
        u16 dst = BNXT_HWRM_CHNL_CHIMP;
 
@@ -4201,7 +4199,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                bar_offset = BNXT_GRCPF_REG_KONG_COMM;
                doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
                resp = bp->hwrm_cmd_kong_resp_addr;
-               resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
        }
 
        memset(resp, 0, PAGE_SIZE);
@@ -4270,7 +4267,6 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
        tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
        timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
        tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
-       resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
 
        if (intr_process) {
                u16 seq_id = bp->hwrm_intr_seq_id;
@@ -4298,9 +4294,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                                           le16_to_cpu(req->req_type));
                        return -EBUSY;
                }
-               len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
-                     HWRM_RESP_LEN_SFT;
-               valid = resp_addr + len - 1;
+               len = le16_to_cpu(resp->resp_len);
+               valid = ((u8 *)resp) + len - 1;
        } else {
                int j;
 
@@ -4311,8 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                         */
                        if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
                                return -EBUSY;
-                       len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
-                             HWRM_RESP_LEN_SFT;
+                       len = le16_to_cpu(resp->resp_len);
                        if (len)
                                break;
                        /* on first few passes, just barely sleep */
@@ -4334,7 +4328,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                }
 
                /* Last byte of resp contains valid bit */
-               valid = resp_addr + len - 1;
+               valid = ((u8 *)resp) + len - 1;
                for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
                        /* make sure we read from updated DMA memory */
                        dma_rmb();
@@ -6642,7 +6636,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
        int rc;
 
        if (!mem_size)
-               return 0;
+               return -EINVAL;
 
        ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
        if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -9310,7 +9304,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
        bnxt_free_skbs(bp);
 
        /* Save ring stats before shutdown */
-       if (bp->bnapi)
+       if (bp->bnapi && irq_re_init)
                bnxt_get_ring_stats(bp, &bp->net_stats_prev);
        if (irq_re_init) {
                bnxt_free_irq(bp);
@@ -9780,6 +9774,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
                                           netdev_features_t features)
 {
        struct bnxt *bp = netdev_priv(dev);
+       netdev_features_t vlan_features;
 
        if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
@@ -9796,12 +9791,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        /* Both CTAG and STAG VLAN accelaration on the RX side have to be
         * turned on or off together.
         */
-       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
-           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+       vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX);
+       if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
+                             NETIF_F_HW_VLAN_STAG_RX)) {
                if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
                        features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
                                      NETIF_F_HW_VLAN_STAG_RX);
-               else
+               else if (vlan_features)
                        features |= NETIF_F_HW_VLAN_CTAG_RX |
                                    NETIF_F_HW_VLAN_STAG_RX;
        }
@@ -12212,12 +12209,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                bnxt_ulp_start(bp, err);
        }
 
-       if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
-               dev_close(netdev);
+       if (result != PCI_ERS_RESULT_RECOVERED) {
+               if (netif_running(netdev))
+                       dev_close(netdev);
+               pci_disable_device(pdev);
+       }
 
        rtnl_unlock();
 
-       return PCI_ERS_RESULT_RECOVERED;
+       return result;
 }
 
 /**
index f2caa2756f5b26014e5501eebbad387ff2612ed4..3d39638521d6c47137927f0d8a1b619cb5e1a28a 100644 (file)
@@ -656,11 +656,6 @@ struct nqe_cn {
 #define HWRM_CMD_TIMEOUT               (bp->hwrm_cmd_timeout)
 #define HWRM_RESET_TIMEOUT             ((HWRM_CMD_TIMEOUT) * 4)
 #define HWRM_COREDUMP_TIMEOUT          ((HWRM_CMD_TIMEOUT) * 12)
-#define HWRM_RESP_ERR_CODE_MASK                0xffff
-#define HWRM_RESP_LEN_OFFSET           4
-#define HWRM_RESP_LEN_MASK             0xffff0000
-#define HWRM_RESP_LEN_SFT              16
-#define HWRM_RESP_VALID_MASK           0xff000000
 #define BNXT_HWRM_REQ_MAX_SIZE         128
 #define BNXT_HWRM_REQS_PER_PAGE                (BNXT_PAGE_SIZE /       \
                                         BNXT_HWRM_REQ_MAX_SIZE)
@@ -1066,7 +1061,6 @@ struct bnxt_vf_info {
 #define BNXT_VF_LINK_FORCED    0x4
 #define BNXT_VF_LINK_UP                0x8
 #define BNXT_VF_TRUST          0x10
-       u32     func_flags; /* func cfg flags */
        u32     min_tx_rate;
        u32     max_tx_rate;
        void    *hwrm_cmd_req_addr;
index 95f893f2a74dc8c2fe2e7c8178f1bb55cb1e2b28..d5c8bd49383acc376cd2cc7d58472cad3c026739 100644 (file)
@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
 #define BNXT_NVM_CFG_VER_BITS          24
 #define BNXT_NVM_CFG_VER_BYTES         4
 
-#define BNXT_MSIX_VEC_MAX      1280
+#define BNXT_MSIX_VEC_MAX      512
 #define BNXT_MSIX_VEC_MIN_MAX  128
 
 enum bnxt_nvm_dir_type {
index 34046a6286e8d0985bd0e195cd1bcd33c9c42dc8..360f9a95c1d50a44bad7862de594dec108af9443 100644 (file)
@@ -2012,11 +2012,12 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
 
        bnxt_hwrm_fw_set_time(bp);
 
-       if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
-                                BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
-                                &index, &item_len, NULL) != 0) {
+       rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+                                 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+                                 &index, &item_len, NULL);
+       if (rc) {
                netdev_err(dev, "PKG update area not created in nvram\n");
-               return -ENOBUFS;
+               return rc;
        }
 
        rc = request_firmware(&fw, filename, &dev->dev);
index 6ea3df6da18cd7ca006b79b9ac95c50f99a6b7bf..cea2f9958a1df50766582ffbc4c876a9edd0c87b 100644 (file)
@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
        if (old_setting == setting)
                return 0;
 
-       func_flags = vf->func_flags;
        if (setting)
-               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
        else
-               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
        /*TODO: if the driver supports VLAN filter on guest VLAN,
         * the spoof check should also include vlan anti-spoofing
         */
@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
        req.flags = cpu_to_le32(func_flags);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
-               vf->func_flags = func_flags;
                if (setting)
                        vf->flags |= BNXT_VF_SPOOFCHK;
                else
@@ -228,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
        memcpy(vf->mac_addr, mac, ETH_ALEN);
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -266,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.dflt_vlan = cpu_to_le16(vlan_tag);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -305,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
                return 0;
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
        req.max_bw = cpu_to_le32(max_tx_rate);
        req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@@ -477,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
        vf = &bp->pf.vf[vf_id];
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
 
        if (is_valid_ether_addr(vf->mac_addr)) {
                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
index 53b50c24d9c9535dad9a5c5b9e5b983822183fe3..2c4c12b03502dc7cf80dbedbda7b8e592b52197e 100644 (file)
@@ -35,8 +35,8 @@ config MACB
 config MACB_USE_HWSTAMP
        bool "Use IEEE 1588 hwstamp"
        depends on MACB
+       depends on PTP_1588_CLOCK
        default y
-       imply PTP_1588_CLOCK
        ---help---
          Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
 
index a0e8c5bbabc018cf0243f88e888a03a75da133a1..36290a8e2a84684886ef719aa2c4a558c1821f01 100644 (file)
@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
        int status;
 
        status = pm_runtime_get_sync(&bp->pdev->dev);
-       if (status < 0)
+       if (status < 0) {
+               pm_runtime_put_noidle(&bp->pdev->dev);
                goto mdio_pm_exit;
+       }
 
        status = macb_mdio_wait_for_idle(bp);
        if (status < 0)
@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        int status;
 
        status = pm_runtime_get_sync(&bp->pdev->dev);
-       if (status < 0)
+       if (status < 0) {
+               pm_runtime_put_noidle(&bp->pdev->dev);
                goto mdio_pm_exit;
+       }
 
        status = macb_mdio_wait_for_idle(bp);
        if (status < 0)
@@ -3816,8 +3820,10 @@ static int at91ether_open(struct net_device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(&lp->pdev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(&lp->pdev->dev);
                return ret;
+       }
 
        /* Clear internal statistics */
        ctl = macb_readl(lp, NCR);
@@ -4172,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
 
 static int fu540_c000_init(struct platform_device *pdev)
 {
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res)
-               return -ENODEV;
-
-       mgmt->reg = ioremap(res->start, resource_size(res));
-       if (!mgmt->reg)
-               return -ENOMEM;
+       mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
+       if (IS_ERR(mgmt->reg))
+               return PTR_ERR(mgmt->reg);
 
        return macb_init(pdev);
 }
index 6a700d34019e303e9d8a301a9c6876e320d99578..4520e7ee00fe1a24aff0e7674e0536879b2f1236 100644 (file)
@@ -54,7 +54,7 @@ config        THUNDER_NIC_RGX
 config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
        depends on 64BIT && PCI
-       imply PTP_1588_CLOCK
+       depends on PTP_1588_CLOCK
        ---help---
          This driver adds support for the Precision Time Protocol Clocks and
          Timestamping coprocessor (PTP) found on Cavium processors.
index f5dd34db4b54527cd7c1415c0ec89529683a925a..6516c45864b359b53e37a8a44e475b6b9b1f7c17 100644 (file)
@@ -2207,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
        if (unlikely(skip_eotx_wr)) {
                start = (u64 *)wr;
                eosw_txq->state = next_state;
+               eosw_txq->cred -= wrlen16;
+               eosw_txq->ncompl++;
+               eosw_txq->last_compl = 0;
                goto write_wr_headers;
        }
 
@@ -2365,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return cxgb4_eth_xmit(skb, dev);
 }
 
+static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
+{
+       int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+       int pidx = eosw_txq->pidx;
+       struct sk_buff *skb;
+
+       if (!pktcount)
+               return;
+
+       if (pktcount < 0)
+               pktcount += eosw_txq->ndesc;
+
+       while (pktcount--) {
+               pidx--;
+               if (pidx < 0)
+                       pidx += eosw_txq->ndesc;
+
+               skb = eosw_txq->desc[pidx].skb;
+               if (skb) {
+                       dev_consume_skb_any(skb);
+                       eosw_txq->desc[pidx].skb = NULL;
+                       eosw_txq->inuse--;
+               }
+       }
+
+       eosw_txq->pidx = eosw_txq->last_pidx + 1;
+}
+
 /**
  * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
  * @dev - netdevice
@@ -2440,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
                                            FW_FLOWC_MNEM_EOSTATE_CLOSING :
                                            FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
 
-       eosw_txq->cred -= len16;
-       eosw_txq->ncompl++;
-       eosw_txq->last_compl = 0;
+       /* Free up any pending skbs to ensure there's room for
+        * termination FLOWC.
+        */
+       if (tc == FW_SCHED_CLS_NONE)
+               eosw_txq_flush_pending_skbs(eosw_txq);
 
        ret = eosw_txq_enqueue(eosw_txq, skb);
        if (ret) {
@@ -2695,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
  *     is ever running at a time ...
  */
 static void service_ofldq(struct sge_uld_txq *q)
+       __must_hold(&q->sendq.lock)
 {
        u64 *pos, *before, *end;
        int credits;
index 2bd7ace0a95393612cf5d3c5d9177eb2336abbd9..bfc6bfe94d0af45474d3556150ca6eecc7c760e9 100644 (file)
@@ -77,6 +77,7 @@ config UCC_GETH
        depends on QUICC_ENGINE && PPC32
        select FSL_PQ_MDIO
        select PHYLIB
+       select FIXED_PHY
        ---help---
          This driver supports the Gigabit Ethernet mode of the QUICC Engine,
          which is available on some Freescale SOCs.
@@ -90,6 +91,7 @@ config GIANFAR
        depends on HAS_DMA
        select FSL_PQ_MDIO
        select PHYLIB
+       select FIXED_PHY
        select CRC32
        ---help---
          This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
index 3b325733a4f8cabfde12defd7e2ae844f1e41112..0a54c7e0e4aeab6d7d4dcc26ae736c379a496e4a 100644 (file)
@@ -3,6 +3,7 @@ menuconfig FSL_DPAA_ETH
        tristate "DPAA Ethernet"
        depends on FSL_DPAA && FSL_FMAN
        select PHYLIB
+       select FIXED_PHY
        select FSL_FMAN_MAC
        ---help---
          Data Path Acceleration Architecture Ethernet driver,
index 2cd1f8efdfa3a9e8aaefcee91b14eb526debac37..6bfa7575af942dca852ea31b1bf0aa766186caad 100644 (file)
@@ -2914,7 +2914,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
        }
 
        /* Do this here, so we can be verbose early */
-       SET_NETDEV_DEV(net_dev, dev);
+       SET_NETDEV_DEV(net_dev, dev->parent);
        dev_set_drvdata(dev, net_dev);
 
        priv = netdev_priv(net_dev);
index b6c46639aa4c45326ec9d3a92e4fb59cefebe4df..d97c320a2dc0393b3a6b84bf43ae0d9bbf865db7 100644 (file)
@@ -86,7 +86,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
        for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
                addr = dpaa2_sg_get_addr(&sgt[i]);
                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+               dma_unmap_page(dev, addr, priv->rx_buf_size,
                               DMA_BIDIRECTIONAL);
 
                free_pages((unsigned long)sg_vaddr, 0);
@@ -144,7 +144,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
                /* Get the address and length from the S/G entry */
                sg_addr = dpaa2_sg_get_addr(sge);
                sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
-               dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
+               dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
                               DMA_BIDIRECTIONAL);
 
                sg_length = dpaa2_sg_get_len(sge);
@@ -185,7 +185,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
                                (page_address(page) - page_address(head_page));
 
                        skb_add_rx_frag(skb, i - 1, head_page, page_offset,
-                                       sg_length, DPAA2_ETH_RX_BUF_SIZE);
+                                       sg_length, priv->rx_buf_size);
                }
 
                if (dpaa2_sg_is_final(sge))
@@ -211,7 +211,7 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
 
        for (i = 0; i < count; i++) {
                vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
-               dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+               dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
                               DMA_BIDIRECTIONAL);
                free_pages((unsigned long)vaddr, 0);
        }
@@ -335,7 +335,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
                break;
        case XDP_REDIRECT:
                dma_unmap_page(priv->net_dev->dev.parent, addr,
-                              DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
+                              priv->rx_buf_size, DMA_BIDIRECTIONAL);
                ch->buf_count--;
                xdp.data_hard_start = vaddr;
                err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
@@ -374,7 +374,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
        trace_dpaa2_rx_fd(priv->net_dev, fd);
 
        vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
-       dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+       dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
                                DMA_BIDIRECTIONAL);
 
        fas = dpaa2_get_fas(vaddr, false);
@@ -393,13 +393,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
                        return;
                }
 
-               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+               dma_unmap_page(dev, addr, priv->rx_buf_size,
                               DMA_BIDIRECTIONAL);
                skb = build_linear_skb(ch, fd, vaddr);
        } else if (fd_format == dpaa2_fd_sg) {
                WARN_ON(priv->xdp_prog);
 
-               dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+               dma_unmap_page(dev, addr, priv->rx_buf_size,
                               DMA_BIDIRECTIONAL);
                skb = build_frag_skb(priv, ch, buf_data);
                free_pages((unsigned long)vaddr, 0);
@@ -974,7 +974,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
                if (!page)
                        goto err_alloc;
 
-               addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
+               addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
                                    DMA_BIDIRECTIONAL);
                if (unlikely(dma_mapping_error(dev, addr)))
                        goto err_map;
@@ -984,7 +984,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
                /* tracing point */
                trace_dpaa2_eth_buf_seed(priv->net_dev,
                                         page, DPAA2_ETH_RX_BUF_RAW_SIZE,
-                                        addr, DPAA2_ETH_RX_BUF_SIZE,
+                                        addr, priv->rx_buf_size,
                                         bpid);
        }
 
@@ -1720,7 +1720,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
        int mfl, linear_mfl;
 
        mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
-       linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+       linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
                     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
 
        if (mfl > linear_mfl) {
@@ -2462,6 +2462,11 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
        else
                rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
 
+       /* We need to ensure that the buffer size seen by WRIOP is a multiple
+        * of 64 or 256 bytes depending on the WRIOP version.
+        */
+       priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
+
        /* tx buffer */
        buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
        buf_layout.pass_timestamp = true;
@@ -3126,7 +3131,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
        pools_params.num_dpbp = 1;
        pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
        pools_params.pools[0].backup_pool = 0;
-       pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
+       pools_params.pools[0].buffer_size = priv->rx_buf_size;
        err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
        if (err) {
                dev_err(dev, "dpni_set_pools() failed\n");
index 7635db3ef903898ddb280d49fe9b1f06ff7024cf..13242bf5b427c497d89b9b8cab0fbcf10734d795 100644 (file)
@@ -382,6 +382,7 @@ struct dpaa2_eth_priv {
        u16 tx_data_offset;
 
        struct fsl_mc_device *dpbp_dev;
+       u16 rx_buf_size;
        u16 bpid;
        struct iommu_domain *iommu_domain;
 
index 94347c6952333e0b0bd7a69396a0901861565e3f..b7141fdc279e9fe0570cb6d79c3cd3a48927a9da 100644 (file)
@@ -635,7 +635,7 @@ static int num_rules(struct dpaa2_eth_priv *priv)
 
 static int update_cls_rule(struct net_device *net_dev,
                           struct ethtool_rx_flow_spec *new_fs,
-                          int location)
+                          unsigned int location)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
        struct dpaa2_eth_cls_rule *rule;
index ebc635f8a4cc498fa4ba147905c68b5a3450941b..15f37c5b8dc14360810c1e6e3da3a8c17d9ecc02 100644 (file)
@@ -74,8 +74,8 @@ err_pci_mem_reg:
        pci_disable_device(pdev);
 err_pci_enable:
 err_mdiobus_alloc:
-       iounmap(port_regs);
 err_hw_alloc:
+       iounmap(port_regs);
 err_ioremap:
        return err;
 }
index 6e5f6dd169b5c580ca48fbb128f0505499c61073..552e7554a9f8ee771d39725cbfd60be53c38c8d6 100644 (file)
@@ -42,6 +42,7 @@
 #include <soc/fsl/qe/ucc.h>
 #include <soc/fsl/qe/ucc_fast.h>
 #include <asm/machdep.h>
+#include <net/sch_generic.h>
 
 #include "ucc_geth.h"
 
@@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
 
 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
 {
-       /* Prevent any further xmits, plus detach the device. */
-       netif_device_detach(ugeth->ndev);
-
-       /* Wait for any current xmits to finish. */
-       netif_tx_disable(ugeth->ndev);
+       /* Prevent any further xmits */
+       netif_tx_stop_all_queues(ugeth->ndev);
 
        /* Disable the interrupt to avoid NAPI rescheduling. */
        disable_irq(ugeth->ug_info->uf_info.irq);
@@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
 {
        napi_enable(&ugeth->napi);
        enable_irq(ugeth->ug_info->uf_info.irq);
-       netif_device_attach(ugeth->ndev);
+
+       /* allow to xmit again  */
+       netif_tx_wake_all_queues(ugeth->ndev);
+       __netdev_watchdog_up(ugeth->ndev);
 }
 
 /* Called every time the controller might need to be made
index 3892a2062404eee66dc73fdf0175cb11db006690..2fff4350909833950d2ac122a66e174fef9b8e78 100644 (file)
@@ -64,7 +64,7 @@ config HNS_MDIO
          the PHY
 
 config HNS
-       tristate "Hisilicon Network Subsystem Support (Framework)"
+       tristate
        ---help---
          This selects the framework support for Hisilicon Network Subsystem. It
          is needed by any driver which provides HNS acceleration engine or make
index 8995e32dd1c001450988bcb7ca349b103ef0f39f..992908e6eebf61923e6fb2fbff2f695b7bd643fd 100644 (file)
@@ -45,6 +45,8 @@
 
 #define MGMT_MSG_TIMEOUT                5000
 
+#define SET_FUNC_PORT_MGMT_TIMEOUT     25000
+
 #define mgmt_to_pfhwdev(pf_mgmt)        \
                container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
 
@@ -238,12 +240,13 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
                            u8 *buf_in, u16 in_size,
                            u8 *buf_out, u16 *out_size,
                            enum mgmt_direction_type direction,
-                           u16 resp_msg_id)
+                           u16 resp_msg_id, u32 timeout)
 {
        struct hinic_hwif *hwif = pf_to_mgmt->hwif;
        struct pci_dev *pdev = hwif->pdev;
        struct hinic_recv_msg *recv_msg;
        struct completion *recv_done;
+       unsigned long timeo;
        u16 msg_id;
        int err;
 
@@ -267,8 +270,9 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
                goto unlock_sync_msg;
        }
 
-       if (!wait_for_completion_timeout(recv_done,
-                                        msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
+       timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT);
+
+       if (!wait_for_completion_timeout(recv_done, timeo)) {
                dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
                err = -ETIMEDOUT;
                goto unlock_sync_msg;
@@ -342,6 +346,7 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
 {
        struct hinic_hwif *hwif = pf_to_mgmt->hwif;
        struct pci_dev *pdev = hwif->pdev;
+       u32 timeout = 0;
 
        if (sync != HINIC_MGMT_MSG_SYNC) {
                dev_err(&pdev->dev, "Invalid MGMT msg type\n");
@@ -353,9 +358,12 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
                return -EINVAL;
        }
 
+       if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+               timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+
        return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
                                buf_out, out_size, MGMT_DIRECT_SEND,
-                               MSG_NOT_RESP);
+                               MSG_NOT_RESP, timeout);
 }
 
 /**
index 13560975c103a29438b4e00542561a4b796abb1f..63b92f6cc856b7786a8ca3cd1809f07c429ced20 100644 (file)
@@ -483,7 +483,6 @@ static int hinic_close(struct net_device *netdev)
 {
        struct hinic_dev *nic_dev = netdev_priv(netdev);
        unsigned int flags;
-       int err;
 
        down(&nic_dev->mgmt_lock);
 
@@ -497,20 +496,9 @@ static int hinic_close(struct net_device *netdev)
 
        up(&nic_dev->mgmt_lock);
 
-       err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
-       if (err) {
-               netif_err(nic_dev, drv, netdev,
-                         "Failed to set func port state\n");
-               nic_dev->flags |= (flags & HINIC_INTF_UP);
-               return err;
-       }
+       hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
 
-       err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE);
-       if (err) {
-               netif_err(nic_dev, drv, netdev, "Failed to set port state\n");
-               nic_dev->flags |= (flags & HINIC_INTF_UP);
-               return err;
-       }
+       hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE);
 
        if (nic_dev->flags & HINIC_RSS_ENABLE) {
                hinic_rss_deinit(nic_dev);
index 4bd33245bad625d7bffb7a7002dc776effd3e956..197dc5b2c0905d26110f747efa0b3071d6457e18 100644 (file)
@@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work)
                                rc = do_hard_reset(adapter, rwi, reset_state);
                                rtnl_unlock();
                        }
-               } else {
+               } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+                               adapter->from_passive_init)) {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
@@ -4677,12 +4678,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
                        break;
                }
-               dev_info(dev, "Partner protocol version is %d\n",
-                        crq->version_exchange_rsp.version);
-               if (be16_to_cpu(crq->version_exchange_rsp.version) <
-                   ibmvnic_version)
-                       ibmvnic_version =
+               ibmvnic_version =
                            be16_to_cpu(crq->version_exchange_rsp.version);
+               dev_info(dev, "Partner protocol version is %d\n",
+                        ibmvnic_version);
                send_cap_queries(adapter);
                break;
        case QUERY_CAPABILITY_RSP:
index 8972cdd559e85c1989389ad7c008f17c43a829fb..d4a4e241333d8a120b4f14d339f2ae3dba22c8d4 100644 (file)
@@ -1070,7 +1070,7 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
                    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
 
        val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
-       val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+       val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
        mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
 }
 
@@ -1428,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
        struct mvpp2_ethtool_fs *efs;
        int ret;
 
+       if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+               return -EINVAL;
+
        efs = port->rfs_rules[info->fs.location];
        if (!efs)
                return -EINVAL;
index 1fa60e985b43aeca4c2d0d41d79d3751b3b5b407..2b5dad2ec650c0874ada85d02c1abfce20cae784 100644 (file)
@@ -4329,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
 
        if (!mvpp22_rss_is_supported())
                return -EOPNOTSUPP;
+       if (rss_context >= MVPP22_N_RSS_TABLES)
+               return -EINVAL;
 
        if (hfunc)
                *hfunc = ETH_RSS_HASH_CRC32;
index 187c633a7af559241a388e79857c3f21b9a2f533..f4227517dc8e03b28753a50c5fdb19961b8624ba 100644 (file)
@@ -497,13 +497,17 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
                                          GFP_KERNEL);
-       if (!hw->irq_name)
+       if (!hw->irq_name) {
+               err = -ENOMEM;
                goto err_free_netdev;
+       }
 
        hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
                                         sizeof(cpumask_var_t), GFP_KERNEL);
-       if (!hw->affinity_mask)
+       if (!hw->affinity_mask) {
+               err = -ENOMEM;
                goto err_free_netdev;
+       }
 
        err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
        if (err < 0) {
index 7a0d785b826c6f909ce79788b2d4a887ae0a379d..17243bb5ba9102e26de9f2f3c0f57f445e040888 100644 (file)
@@ -1418,7 +1418,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
 
        pep->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(pep->base)) {
-               err = -ENOMEM;
+               err = PTR_ERR(pep->base);
                goto err_netdev;
        }
 
index 6e501af0e5322d648adf3938f9ac0e4d2a2d6360..f6ff9620a13772c66aaf205a8b1411c81275cbd9 100644 (file)
@@ -2734,7 +2734,7 @@ void mlx4_opreq_action(struct work_struct *work)
                if (err) {
                        mlx4_err(dev, "Failed to retrieve required operation: %d\n",
                                 err);
-                       return;
+                       goto out;
                }
                MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
                MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
index 5716c3d2bb86aac7317e06ee0b89184e14238881..c72c4e1ea383b81a0b3e5f4eb791e87e715e9f6f 100644 (file)
@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
 
                if (!err || err == -ENOSPC) {
                        priv->def_counter[port] = idx;
+                       err = 0;
                } else if (err == -ENOENT) {
                        err = 0;
                        continue;
@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
                                   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                if (!err)
                        *idx = get_param_l(&out_param);
-
+               if (WARN_ON(err == -ENOSPC))
+                       err = -EINVAL;
                return err;
        }
        return __mlx4_counter_alloc(dev, idx);
index 7d69a3061f1789d0804cff1f0c7f612ff02ed465..fd375cbe586e154907debeb402449c4f90c4d2ee 100644 (file)
@@ -80,7 +80,7 @@ config MLX5_ESWITCH
 
 config MLX5_TC_CT
        bool "MLX5 TC connection tracking offload support"
-       depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+       depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
        default y
        help
          Say Y here if you want to support offloading connection tracking rules
index 34cba97f7bf4891a2bb26e10cf3c31d30cd40550..7a77fe40af3aaac849ba1ee70dbfb0f7da4512ed 100644 (file)
@@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
                              struct mlx5_cmd_msg *msg);
 
+static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
+{
+       if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
+               return true;
+
+       return cmd->allowed_opcode == opcode;
+}
+
 static void cmd_work_handler(struct work_struct *work)
 {
        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work)
        int alloc_ret;
        int cmd_mode;
 
+       complete(&ent->handling);
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
        if (!ent->page_queue) {
@@ -888,7 +897,6 @@ static void cmd_work_handler(struct work_struct *work)
        }
 
        cmd->ent_arr[ent->idx] = ent;
-       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
        memset(lay, 0, sizeof(*lay));
@@ -910,10 +918,13 @@ static void cmd_work_handler(struct work_struct *work)
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 
        /* Skip sending command to fw if internal error */
        if (pci_channel_offline(dev->pdev) ||
-           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+           cmd->state != MLX5_CMDIF_STATE_UP ||
+           !opcode_allowed(&dev->cmd, ent->op)) {
                u8 status = 0;
                u32 drv_synd;
 
@@ -922,6 +933,10 @@ static void cmd_work_handler(struct work_struct *work)
                MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
 
                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+               /* no doorbell, no need to keep the entry */
+               free_ent(cmd, ent->idx);
+               if (ent->callback)
+                       free_cmd(ent);
                return;
        }
 
@@ -974,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
        struct mlx5_cmd *cmd = &dev->cmd;
        int err;
 
+       if (!wait_for_completion_timeout(&ent->handling, timeout) &&
+           cancel_work_sync(&ent->work)) {
+               ent->ret = -ECANCELED;
+               goto out_err;
+       }
        if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
                wait_for_completion(&ent->done);
        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
@@ -981,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
        }
 
+out_err:
        err = ent->ret;
 
        if (err == -ETIMEDOUT) {
                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                               mlx5_command_str(msg_to_opcode(ent->in)),
                               msg_to_opcode(ent->in));
+       } else if (err == -ECANCELED) {
+               mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
+                              mlx5_command_str(msg_to_opcode(ent->in)),
+                              msg_to_opcode(ent->in));
        }
        mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
                      err, deliv_status_to_str(ent->status), ent->status);
@@ -1022,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        ent->token = token;
        ent->polling = force_polling;
 
+       init_completion(&ent->handling);
        if (!callback)
                init_completion(&ent->done);
 
@@ -1041,6 +1067,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        err = wait_func(dev, ent);
        if (err == -ETIMEDOUT)
                goto out;
+       if (err == -ECANCELED)
+               goto out_free;
 
        ds = ent->ts2 - ent->ts1;
        op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1387,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
        mlx5_cmdif_debugfs_init(dev);
 }
 
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
+{
+       struct mlx5_cmd *cmd = &dev->cmd;
+       int i;
+
+       for (i = 0; i < cmd->max_reg_cmds; i++)
+               down(&cmd->sem);
+       down(&cmd->pages_sem);
+
+       cmd->allowed_opcode = opcode;
+
+       up(&cmd->pages_sem);
+       for (i = 0; i < cmd->max_reg_cmds; i++)
+               up(&cmd->sem);
+}
+
 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
@@ -1663,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
        int err;
        u8 status = 0;
        u32 drv_synd;
+       u16 opcode;
        u8 token;
 
+       opcode = MLX5_GET(mbox_in, in, opcode);
        if (pci_channel_offline(dev->pdev) ||
-           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               u16 opcode = MLX5_GET(mbox_in, in, opcode);
-
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+           dev->cmd.state != MLX5_CMDIF_STATE_UP ||
+           !opcode_allowed(&dev->cmd, opcode)) {
                err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
                MLX5_SET(mbox_out, out, status, status);
                MLX5_SET(mbox_out, out, syndrome, drv_synd);
@@ -1933,6 +1979,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                goto err_free_page;
        }
 
+       cmd->state = MLX5_CMDIF_STATE_DOWN;
        cmd->checksum_disabled = 1;
        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
        cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
@@ -1970,6 +2017,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
 
        cmd->mode = CMD_MODE_POLLING;
+       cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
 
        create_msg_cache(dev);
 
@@ -2009,3 +2057,10 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
        dma_pool_destroy(cmd->pool);
 }
 EXPORT_SYMBOL(mlx5_cmd_cleanup);
+
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+                       enum mlx5_cmdif_state cmdif_state)
+{
+       dev->cmd.state = cmdif_state;
+}
+EXPORT_SYMBOL(mlx5_cmd_set_state);
index 23701c0e36ec38c3533deb67c64e62da9b50bbf3..0a5aada0f50f97882e0108beb56f3e0033ea727e 100644 (file)
@@ -1068,10 +1068,12 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 
 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
                                   int num_channels);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
-                                u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
-                                u8 cq_period_mode);
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
                               struct mlx5e_params *params);
@@ -1121,7 +1123,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
 
 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
 
 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
index 2c4a670c8ffd4b076460b5b7d6e8b70c7499e5f3..2a8950b3056f95445bc335835c0d694fa7d01011 100644 (file)
@@ -369,17 +369,19 @@ enum mlx5e_fec_supported_link_mode {
                        *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
        } while (0)
 
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)          \
-       do {                                                                    \
-               u16 *__policy = &(policy);                                      \
-               bool _write = (write);                                          \
-                                                                               \
-               if (_write && *__policy)                                        \
-                       *__policy = find_first_bit((u_long *)__policy,          \
-                                                  sizeof(u16) * BITS_PER_BYTE);\
-               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);  \
-               if (!_write && *__policy)                                       \
-                       *__policy = 1 << *__policy;                             \
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)                  \
+       do {                                                                            \
+               unsigned long policy_long;                                              \
+               u16 *__policy = &(policy);                                              \
+               bool _write = (write);                                                  \
+                                                                                       \
+               policy_long = *__policy;                                                \
+               if (_write && *__policy)                                                \
+                       *__policy = find_first_bit(&policy_long,                        \
+                                                  sizeof(policy_long) * BITS_PER_BYTE);\
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);          \
+               if (!_write && *__policy)                                               \
+                       *__policy = 1 << *__policy;                                     \
        } while (0)
 
 /* get/set FEC admin field for a given speed */
index a172c5e39710b78362fadfd1db499da4786e4638..4eb305af010698ea489310a466c4efc451419fc1 100644 (file)
@@ -699,6 +699,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                       struct netlink_ext_ack *extack)
 {
        struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
+       struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector_key_ct *mask, *key;
        bool trk, est, untrk, unest, new;
        u32 ctstate = 0, ctstate_mask = 0;
@@ -706,7 +707,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
        u16 ct_state, ct_state_mask;
        struct flow_match_ct match;
 
-       if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+       if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
                return 0;
 
        if (!ct_priv) {
@@ -715,7 +716,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                return -EOPNOTSUPP;
        }
 
-       flow_rule_match_ct(f->rule, &match);
+       flow_rule_match_ct(rule, &match);
 
        key = match.key;
        mask = match.mask;
index 091d305b633e6adc342e007e4bf7ff5ed8818f7b..626f6c04882eeab1e0334b13e1ec2d6925330213 100644 (file)
@@ -130,7 +130,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                       struct flow_cls_offload *f,
                       struct netlink_ext_ack *extack)
 {
-       if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
+       struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+
+       if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
                return 0;
 
        NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
index 46725cd743a369b6531b2dc3fd8051e2129f266e..7d1985fa0d4f7603ce0f3a1a9ee7d02028601d40 100644 (file)
@@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev,
        struct mlx5e_ktls_offload_context_tx *tx_priv =
                mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
 
-       mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
        mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
+       mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
        kvfree(tx_priv);
 }
 
index 6d703ddee4e27fef265f97aefff8dac422dfc8ed..bc290ae80a5311ed845b59f86c9b9b625b66a12b 100644 (file)
@@ -527,8 +527,8 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
        struct dim_cq_moder *rx_moder, *tx_moder;
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_channels new_channels = {};
+       bool reset_rx, reset_tx;
        int err = 0;
-       bool reset;
 
        if (!MLX5_CAP_GEN(mdev, cq_moderation))
                return -EOPNOTSUPP;
@@ -566,15 +566,28 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
        }
        /* we are opened */
 
-       reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
-               (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+       reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+       reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
 
-       if (!reset) {
+       if (!reset_rx && !reset_tx) {
                mlx5e_set_priv_channels_coalesce(priv, coal);
                priv->channels.params = new_channels.params;
                goto out;
        }
 
+       if (reset_rx) {
+               u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+                                         MLX5E_PFLAG_RX_CQE_BASED_MODER);
+
+               mlx5e_reset_rx_moderation(&new_channels.params, mode);
+       }
+       if (reset_tx) {
+               u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+                                         MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
+               mlx5e_reset_tx_moderation(&new_channels.params, mode);
+       }
+
        err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
 
 out:
@@ -665,11 +678,12 @@ static const u32 pplm_fec_2_ethtool_linkmodes[] = {
 static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
                                        struct ethtool_link_ksettings *link_ksettings)
 {
-       u_long active_fec = 0;
+       unsigned long active_fec_long;
+       u32 active_fec;
        u32 bitn;
        int err;
 
-       err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
+       err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
        if (err)
                return (err == -EOPNOTSUPP) ? 0 : err;
 
@@ -682,10 +696,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
        MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
                                      ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
 
+       active_fec_long = active_fec;
        /* active fec is a bit set, find out which bit is set and
         * advertise the corresponding ethtool bit
         */
-       bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
+       bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
        if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
                __set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
                          link_ksettings->link_modes.advertising);
@@ -1517,8 +1532,8 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       u16 fec_configured = 0;
-       u32 fec_active = 0;
+       u16 fec_configured;
+       u32 fec_active;
        int err;
 
        err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
@@ -1526,14 +1541,14 @@ static int mlx5e_get_fecparam(struct net_device *netdev,
        if (err)
                return err;
 
-       fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
-                                               sizeof(u32) * BITS_PER_BYTE);
+       fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
+                                               sizeof(unsigned long) * BITS_PER_BYTE);
 
        if (!fecparam->active_fec)
                return -EOPNOTSUPP;
 
-       fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
-                                        sizeof(u16) * BITS_PER_BYTE);
+       fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
+                                        sizeof(unsigned long) * BITS_PER_BYTE);
 
        return 0;
 }
index b314adf438da2d55c412e847a7ad149d415bcc65..bd8d0e096085716735b528c410a55ace9db77c5d 100644 (file)
@@ -2717,7 +2717,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
                mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
        }
 
-       if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+       /* Verify inner tirs resources allocated */
+       if (!priv->inner_indir_tir[0].tirn)
                return;
 
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
@@ -3408,14 +3409,15 @@ out:
        return err;
 }
 
-void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
+void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
 {
        int i;
 
        for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
                mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
 
-       if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
+       /* Verify inner tirs resources allocated */
+       if (!priv->inner_indir_tir[0].tirn)
                return;
 
        for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@@ -4714,7 +4716,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
                DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 }
 
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
 {
        if (params->tx_dim_enabled) {
                u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4723,13 +4725,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
        } else {
                params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
        }
-
-       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
-                       params->tx_cq_moderation.cq_period_mode ==
-                               MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
 }
 
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
 {
        if (params->rx_dim_enabled) {
                u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4738,7 +4736,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
        } else {
                params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
        }
+}
 
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+       mlx5e_reset_tx_moderation(params, cq_period_mode);
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+                       params->tx_cq_moderation.cq_period_mode ==
+                               MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+       mlx5e_reset_rx_moderation(params, cq_period_mode);
        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
                        params->rx_cq_moderation.cq_period_mode ==
                                MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -5123,7 +5133,7 @@ err_destroy_xsk_rqts:
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
 err_destroy_indirect_tirs:
-       mlx5e_destroy_indirect_tirs(priv, true);
+       mlx5e_destroy_indirect_tirs(priv);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
 err_destroy_indirect_rqts:
@@ -5142,7 +5152,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
        mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
        mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
-       mlx5e_destroy_indirect_tirs(priv, true);
+       mlx5e_destroy_indirect_tirs(priv);
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
index 55457f268495e68205fee6bdf9ae166b130c67ea..4a8e0dfdc5f2c62331a0de68f1001abd5f6f17a4 100644 (file)
@@ -1484,13 +1484,9 @@ bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
        return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
 }
 
-bool mlx5e_eswitch_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
 {
-       if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
-           netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
-               return true;
-
-       return false;
+       return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
 }
 
 static void mlx5e_build_rep_params(struct net_device *netdev)
@@ -1747,7 +1743,7 @@ err_destroy_ttc_table:
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
 err_destroy_indirect_tirs:
-       mlx5e_destroy_indirect_tirs(priv, false);
+       mlx5e_destroy_indirect_tirs(priv);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
 err_destroy_indirect_rqts:
@@ -1765,7 +1761,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
        mlx5e_destroy_rep_root_ft(priv);
        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
-       mlx5e_destroy_indirect_tirs(priv, false);
+       mlx5e_destroy_indirect_tirs(priv);
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
@@ -1773,19 +1769,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 
 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
 {
-       int err = mlx5e_init_rep_rx(priv);
-
-       if (err)
-               return err;
-
        mlx5e_create_q_counters(priv);
-       return 0;
+       return mlx5e_init_rep_rx(priv);
 }
 
 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
 {
-       mlx5e_destroy_q_counters(priv);
        mlx5e_cleanup_rep_rx(priv);
+       mlx5e_destroy_q_counters(priv);
 }
 
 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
index 6a233790042002166088aaf73accf459ee5adaae..612b5cf0673d1bb7939998e2e61b6694482faca6 100644 (file)
@@ -210,8 +210,13 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
 
 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
 
-bool mlx5e_eswitch_rep(struct net_device *netdev);
+bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
+static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
+{
+       return mlx5e_eswitch_vf_rep(netdev) ||
+              mlx5e_eswitch_uplink_rep(netdev);
+}
 
 #else /* CONFIG_MLX5_ESWITCH */
 static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
index a574c588269a0f0110eb05c745351c9a8c0c761e..10f705761666b43f5e7941f12ce57449dd66e52f 100644 (file)
@@ -2068,7 +2068,7 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
        flow_rule_match_meta(rule, &match);
        if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 
        ingress_dev = __dev_get_by_index(dev_net(filter_dev),
@@ -2076,13 +2076,13 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
        if (!ingress_dev) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can't find the ingress port to match on");
-               return -EINVAL;
+               return -ENOENT;
        }
 
        if (ingress_dev != filter_dev) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can't match on the ingress filter port");
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 
        return 0;
@@ -3073,6 +3073,11 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        return true;
 }
 
+static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+       return priv->mdev == peer_priv->mdev;
+}
+
 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
 {
        struct mlx5_core_dev *fmdev, *pmdev;
@@ -3291,7 +3296,7 @@ static inline int hash_encap_info(struct encap_key *key)
 }
 
 
-static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
                                  struct net_device *peer_netdev)
 {
        struct mlx5e_priv *peer_priv;
@@ -3299,13 +3304,11 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
        peer_priv = netdev_priv(peer_netdev);
 
        return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
-               mlx5e_eswitch_rep(priv->netdev) &&
-               mlx5e_eswitch_rep(peer_netdev) &&
+               mlx5e_eswitch_vf_rep(priv->netdev) &&
+               mlx5e_eswitch_vf_rep(peer_netdev) &&
                same_hw_devs(priv, peer_priv));
 }
 
-
-
 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
 {
        return refcount_inc_not_zero(&e->refcnt);
@@ -3575,14 +3578,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
        return err;
 }
 
+static bool same_hw_reps(struct mlx5e_priv *priv,
+                        struct net_device *peer_netdev)
+{
+       struct mlx5e_priv *peer_priv;
+
+       peer_priv = netdev_priv(peer_netdev);
+
+       return mlx5e_eswitch_rep(priv->netdev) &&
+              mlx5e_eswitch_rep(peer_netdev) &&
+              same_hw_devs(priv, peer_priv);
+}
+
+static bool is_lag_dev(struct mlx5e_priv *priv,
+                      struct net_device *peer_netdev)
+{
+       return ((mlx5_lag_is_sriov(priv->mdev) ||
+                mlx5_lag_is_multipath(priv->mdev)) &&
+                same_hw_reps(priv, peer_netdev));
+}
+
 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
                                    struct net_device *out_dev)
 {
-       if (is_merged_eswitch_dev(priv, out_dev))
+       if (is_merged_eswitch_vfs(priv, out_dev))
+               return true;
+
+       if (is_lag_dev(priv, out_dev))
                return true;
 
        return mlx5e_eswitch_rep(out_dev) &&
-              same_hw_devs(priv, netdev_priv(out_dev));
+              same_port_devs(priv, netdev_priv(out_dev));
 }
 
 static bool is_duplicated_output_device(struct net_device *dev,
@@ -3823,10 +3849,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
                                        NL_SET_ERR_MSG_MOD(extack,
                                                           "devices are not on same switch HW, can't offload forwarding");
-                                       netdev_warn(priv->netdev,
-                                                   "devices %s %s not on same switch HW, can't offload forwarding\n",
-                                                   priv->netdev->name,
-                                                   out_dev->name);
                                        return -EOPNOTSUPP;
                                }
 
@@ -4588,7 +4610,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
        dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
        dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
        rpriv->prev_vf_vport_stats = cur_stats;
-       flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
+       flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
                          FLOW_ACTION_HW_STATS_DELAYED);
 }
 
index fd6b2a1898c54a2895ebee9743087f48c34be377..119a5c6cc167d6d75f1a8a62edf20c48a4996226 100644 (file)
@@ -537,10 +537,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_tx_wqe_info *wi;
+       u32 dma_fifo_cc, nbytes = 0;
+       u16 ci, sqcc, npkts = 0;
        struct sk_buff *skb;
-       u32 dma_fifo_cc;
-       u16 sqcc;
-       u16 ci;
        int i;
 
        sqcc = sq->cc;
@@ -565,11 +564,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
                }
 
                dev_kfree_skb_any(skb);
+               npkts++;
+               nbytes += wi->num_bytes;
                sqcc += wi->num_wqebbs;
        }
 
        sq->dma_fifo_cc = dma_fifo_cc;
        sq->cc = sqcc;
+
+       netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 }
 
 #ifdef CONFIG_MLX5_CORE_IPOIB
index cccea3a8eddd00a1e282696b797f6f2f16d1973b..ce6c621af0430a9b97eb7b79e066cb7c4eb9a077 100644 (file)
@@ -611,11 +611,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
                .nent = MLX5_NUM_CMD_EQE,
                .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
        };
+       mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
        err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
        if (err)
                goto err1;
 
        mlx5_cmd_use_events(dev);
+       mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
 
        param = (struct mlx5_eq_param) {
                .irq_index = 0,
@@ -645,6 +647,7 @@ err2:
        mlx5_cmd_use_polling(dev);
        cleanup_async_eq(dev, &table->cmd_eq, "cmd");
 err1:
+       mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
        mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
        return err;
 }
index b2e38e0cde976dd71534a0a70acc8397e7c188ea..5d9def18ae3a7b7496d8f4cc7c77f610c23b296f 100644 (file)
@@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
                                           MLX5_FLOW_NAMESPACE_KERNEL, 1,
                                           modact);
        if (IS_ERR(mod_hdr)) {
+               err = PTR_ERR(mod_hdr);
                esw_warn(dev, "Failed to create restore mod header, err: %d\n",
                         err);
-               err = PTR_ERR(mod_hdr);
                goto err_mod_hdr;
        }
 
@@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
                total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
 
        memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
+       mutex_init(&esw->fdb_table.offloads.vports.lock);
+       hash_init(esw->fdb_table.offloads.vports.table);
 
        err = esw_create_uplink_offloads_acl_tables(esw);
        if (err)
-               return err;
+               goto create_acl_err;
 
        err = esw_create_offloads_table(esw, total_vports);
        if (err)
@@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
        if (err)
                goto create_fg_err;
 
-       mutex_init(&esw->fdb_table.offloads.vports.lock);
-       hash_init(esw->fdb_table.offloads.vports.table);
-
        return 0;
 
 create_fg_err:
@@ -2253,18 +2252,19 @@ create_restore_err:
        esw_destroy_offloads_table(esw);
 create_offloads_err:
        esw_destroy_uplink_offloads_acl_tables(esw);
-
+create_acl_err:
+       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
        return err;
 }
 
 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
 {
-       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
        esw_destroy_vport_rx_group(esw);
        esw_destroy_offloads_fdb_tables(esw);
        esw_destroy_restore_table(esw);
        esw_destroy_offloads_table(esw);
        esw_destroy_uplink_offloads_acl_tables(esw);
+       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
 }
 
 static void
@@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
 err_vports:
        esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
 err_uplink:
-       esw_set_passing_vport_metadata(esw, false);
-err_steering_init:
        esw_offloads_steering_cleanup(esw);
+err_steering_init:
+       esw_set_passing_vport_metadata(esw, false);
 err_vport_metadata:
        mlx5_rdma_disable_roce(esw->dev);
        mutex_destroy(&esw->offloads.termtbl_mutex);
index 8bcf3426b9c6ad454792a75d748d87e02e0ec698..3ce17c3d7a0014082b74a5ac00428935a76f48b9 100644 (file)
@@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
        events->dev = dev;
        dev->priv.events = events;
        events->wq = create_singlethread_workqueue("mlx5_events");
-       if (!events->wq)
+       if (!events->wq) {
+               kfree(events);
                return -ENOMEM;
+       }
        INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
 
        return 0;
index d5defe09339a8a4e406027062b0e64f855f4acd2..9620c8650e13dda1f8b02fd0e9208b6c02ea4733 100644 (file)
@@ -344,17 +344,12 @@ static void tree_put_node(struct fs_node *node, bool locked)
                if (node->del_hw_func)
                        node->del_hw_func(node);
                if (parent_node) {
-                       /* Only root namespace doesn't have parent and we just
-                        * need to free its node.
-                        */
                        down_write_ref_node(parent_node, locked);
                        list_del_init(&node->list);
-                       if (node->del_sw_func)
-                               node->del_sw_func(node);
-                       up_write_ref_node(parent_node, locked);
-               } else {
-                       kfree(node);
                }
+               node->del_sw_func(node);
+               if (parent_node)
+                       up_write_ref_node(parent_node, locked);
                node = NULL;
        }
        if (!node && parent_node)
@@ -468,8 +463,10 @@ static void del_sw_flow_table(struct fs_node *node)
        fs_get_obj(ft, node);
 
        rhltable_destroy(&ft->fgs_hash);
-       fs_get_obj(prio, ft->node.parent);
-       prio->num_ft--;
+       if (ft->node.parent) {
+               fs_get_obj(prio, ft->node.parent);
+               prio->num_ft--;
+       }
        kfree(ft);
 }
 
@@ -2351,6 +2348,17 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
        return 0;
 }
 
+static void del_sw_root_ns(struct fs_node *node)
+{
+       struct mlx5_flow_root_namespace *root_ns;
+       struct mlx5_flow_namespace *ns;
+
+       fs_get_obj(ns, node);
+       root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
+       mutex_destroy(&root_ns->chain_lock);
+       kfree(node);
+}
+
 static struct mlx5_flow_root_namespace
 *create_root_ns(struct mlx5_flow_steering *steering,
                enum fs_flow_table_type table_type)
@@ -2377,7 +2385,7 @@ static struct mlx5_flow_root_namespace
        ns = &root_ns->ns;
        fs_init_namespace(ns);
        mutex_init(&root_ns->chain_lock);
-       tree_init_node(&ns->node, NULL, NULL);
+       tree_init_node(&ns->node, NULL, del_sw_root_ns);
        tree_add_node(&ns->node, NULL);
 
        return root_ns;
index 673aaa815f571046fa177d54c8ab4c40a01872c8..505cf6eeae25cecc38f8e13e03fd046928b87b55 100644 (file)
@@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
 err_destroy_direct_tirs:
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
 err_destroy_indirect_tirs:
-       mlx5e_destroy_indirect_tirs(priv, true);
+       mlx5e_destroy_indirect_tirs(priv);
 err_destroy_direct_rqts:
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
 err_destroy_indirect_rqts:
@@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
 {
        mlx5i_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
-       mlx5e_destroy_indirect_tirs(priv, true);
+       mlx5e_destroy_indirect_tirs(priv);
        mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
index 7af4210c1b967aa5375b3d423872d9dafdd6c82e..17f818a5409038881e9344913d77be52727756ca 100644 (file)
@@ -965,6 +965,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
                goto err_cmd_cleanup;
        }
 
+       mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
+
        err = mlx5_core_enable_hca(dev, 0);
        if (err) {
                mlx5_core_err(dev, "enable hca failed\n");
@@ -1026,6 +1028,7 @@ reclaim_boot_pages:
 err_disable_hca:
        mlx5_core_disable_hca(dev, 0);
 err_cmd_cleanup:
+       mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
 
        return err;
@@ -1043,6 +1046,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
        }
        mlx5_reclaim_startup_pages(dev);
        mlx5_core_disable_hca(dev, 0);
+       mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
 
        return 0;
@@ -1191,7 +1195,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
 
        err = mlx5_function_setup(dev, boot);
        if (err)
-               goto out;
+               goto err_function;
 
        if (boot) {
                err = mlx5_init_once(dev);
@@ -1229,6 +1233,7 @@ err_load:
                mlx5_cleanup_once(dev);
 function_teardown:
        mlx5_function_teardown(dev, boot);
+err_function:
        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
        mutex_unlock(&dev->intf_state_mutex);
 
@@ -1544,6 +1549,22 @@ static void shutdown(struct pci_dev *pdev)
        mlx5_pci_disable_device(dev);
 }
 
+static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+       mlx5_unload_one(dev, false);
+
+       return 0;
+}
+
+static int mlx5_resume(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+       return mlx5_load_one(dev, false);
+}
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
        { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},   /* Connect-IB VF */
@@ -1587,6 +1608,8 @@ static struct pci_driver mlx5_core_driver = {
        .id_table       = mlx5_core_pci_table,
        .probe          = init_one,
        .remove         = remove_one,
+       .suspend        = mlx5_suspend,
+       .resume         = mlx5_resume,
        .shutdown       = shutdown,
        .err_handler    = &mlx5_err_handler,
        .sriov_configure   = mlx5_core_sriov_configure,
index c0ab9cf74929ca4346ff03490090c293baa5fb6b..18719acb7e547422a0c65faaad85eb2b07923fa8 100644 (file)
@@ -695,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
        pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
 }
 
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+                          struct mlx5_eqe *eqe)
+{
+       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
 static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
                                      struct mlx5_uars_page *uar,
                                      size_t ncqe)
@@ -756,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
 
        cq->mcq.event = dr_cq_event;
+       cq->mcq.comp  = dr_cq_complete;
 
        err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
        kvfree(in);
@@ -767,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
        cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
        *cq->mcq.set_ci_db = 0;
-       *cq->mcq.arm_db = 0;
+
+       /* set no-zero value, in order to avoid the HW to run db-recovery on
+        * CQ that used in polling mode.
+        */
+       *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
        cq->mcq.vector = 0;
        cq->mcq.irqn = irqn;
        cq->mcq.uar = uar;
index 24ca8d5bc56410dfa63833ceefb00ffd4d8a4926..6b39978acd0781dde5c37ca110b3b0c595eed49a 100644 (file)
@@ -3986,6 +3986,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
                        mlxsw_sp_port_remove(mlxsw_sp, i);
        mlxsw_sp_cpu_port_remove(mlxsw_sp);
        kfree(mlxsw_sp->ports);
+       mlxsw_sp->ports = NULL;
 }
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
@@ -4022,6 +4023,7 @@ err_port_create:
        mlxsw_sp_cpu_port_remove(mlxsw_sp);
 err_cpu_port_create:
        kfree(mlxsw_sp->ports);
+       mlxsw_sp->ports = NULL;
        return err;
 }
 
@@ -4143,6 +4145,14 @@ static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
        return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
 }
 
+static struct mlxsw_sp_port *
+mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+       if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
+               return mlxsw_sp->ports[local_port];
+       return NULL;
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                               unsigned int count,
                               struct netlink_ext_ack *extack)
@@ -4156,7 +4166,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
        int i;
        int err;
 
-       mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
        if (!mlxsw_sp_port) {
                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
                        local_port);
@@ -4251,7 +4261,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
        int offset;
        int i;
 
-       mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
        if (!mlxsw_sp_port) {
                dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
                        local_port);
index 430da69003d838f7978384c71d3205d40c4276ee..a6e30e020b5cf3de0b0915821130e50dbb11e0e5 100644 (file)
@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
                                unsigned int priority,
                                struct mlxsw_afk_element_usage *elusage)
 {
+       struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
        struct mlxsw_sp_acl_tcam_vregion *vregion;
-       struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+       struct list_head *pos;
        int err;
 
        if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
        }
 
        mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
-       list_add_tail(&vchunk->list, &vregion->vchunk_list);
+
+       /* Position the vchunk inside the list according to priority */
+       list_for_each(pos, &vregion->vchunk_list) {
+               vchunk2 = list_entry(pos, typeof(*vchunk2), list);
+               if (vchunk2->priority > priority)
+                       break;
+       }
+       list_add_tail(&vchunk->list, pos);
        mutex_unlock(&vregion->lock);
 
        return vchunk;
index 51117a5a6bbf2764221c8707d854322e1327748a..890b078851c9f054b8e5ba11fbb18f0e92e920bb 100644 (file)
@@ -36,7 +36,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
                if (err)
                        return err;
-       } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
+       } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
+                  act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
                return -EOPNOTSUPP;
        }
index 90535820b559db36040c7758657ad8d3758a7267..2503f61db5fbe284a4b89c4aa2bf6e1849727e70 100644 (file)
@@ -1259,6 +1259,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
                if (mlxsw_sx_port_created(mlxsw_sx, i))
                        mlxsw_sx_port_remove(mlxsw_sx, i);
        kfree(mlxsw_sx->ports);
+       mlxsw_sx->ports = NULL;
 }
 
 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
@@ -1293,6 +1294,7 @@ err_port_module_info_get:
                if (mlxsw_sx_port_created(mlxsw_sx, i))
                        mlxsw_sx_port_remove(mlxsw_sx, i);
        kfree(mlxsw_sx->ports);
+       mlxsw_sx->ports = NULL;
        return err;
 }
 
@@ -1376,6 +1378,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
        u8 module, width;
        int err;
 
+       if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
+                       local_port);
+               return -EINVAL;
+       }
+
        if (new_type == DEVLINK_PORT_TYPE_AUTO)
                return -EOPNOTSUPP;
 
index 39925e4bf2ecb4a8d8304c2b36b90a3fd80a2b60..b25a13da900a07f3f251257f3b137a1ba54d9fbb 100644 (file)
@@ -1070,7 +1070,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
        if (unlikely(ret)) {
                netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
                          ret);
-               goto out_free;
+               goto out_stop;
        }
 
        eidled = encx24j600_read_reg(priv, EIDLED);
@@ -1088,6 +1088,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
 
 out_unregister:
        unregister_netdev(priv->ndev);
+out_stop:
+       kthread_stop(priv->kworker_task);
 out_free:
        free_netdev(ndev);
 
@@ -1100,6 +1102,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
        struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
 
        unregister_netdev(priv->ndev);
+       kthread_stop(priv->kworker_task);
 
        free_netdev(priv->ndev);
 
index e1651756bf9da181a089caccc7011c33fd9f4267..f70bb81e1ed652c44242ecd39e7bcf700dcfdb30 100644 (file)
@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
 
        unregister_netdev(ndev);
-       free_irq(ndev->irq, ndev);
+       devm_free_irq(&pdev->dev, ndev->irq, ndev);
        moxart_mac_free_memory(ndev);
        free_netdev(ndev);
 
index a8c48a4a708f27f954c3adc0309205f545653b2b..efb3965a3e42b6b3ce67350a59dc93c06042ef43 100644 (file)
@@ -1031,10 +1031,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
 {
        int i, j;
 
-       /* Loop through all the mac tables entries. There are 1024 rows of 4
-        * entries.
-        */
-       for (i = 0; i < 1024; i++) {
+       /* Loop through all the mac tables entries. */
+       for (i = 0; i < ocelot->num_mact_rows; i++) {
                for (j = 0; j < 4; j++) {
                        struct ocelot_mact_entry entry;
                        bool is_static;
@@ -1453,8 +1451,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
 
 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
 {
-       ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
-                    ANA_AUTOAGE);
+       unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
+
+       /* Setting AGE_PERIOD to zero effectively disables automatic aging,
+        * which is clearly not what our intention is. So avoid that.
+        */
+       if (!age_period)
+               age_period = 1;
+
+       ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
 }
 EXPORT_SYMBOL(ocelot_set_ageing_time);
 
@@ -1462,7 +1467,7 @@ static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
                                        unsigned long ageing_clock_t)
 {
        unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
-       u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+       u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
 
        ocelot_set_ageing_time(ocelot, ageing_time);
 }
index b88b5899b22736fdc44f4dc7da04453282012d95..7d4fd1b6addaf2aba2e662614d275a1e38580c9f 100644 (file)
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
        ocelot->stats_layout = ocelot_stats_layout;
        ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
        ocelot->shared_queue_sz = 224 * 1024;
+       ocelot->num_mact_rows = 1024;
        ocelot->ops = ops;
 
        ret = ocelot_regfields_init(ocelot, ocelot_regfields);
index bfa0c0d39600750b19e72c1915680737e38d744a..8b018ed37b1ba4c7a412431ce55fe0a0cf865e45 100644 (file)
@@ -208,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
 
        err = register_netdev(dev);
        if (err)
-               goto out1;
+               goto undo_probe1;
 
        return 0;
 
-out1:
+undo_probe1:
+       dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
        release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
 out:
        free_netdev(dev);
index 9183b3e85d217dec1328065f4594ce5e5895491d..bdbf0726145e54cad25750abe956149ea2173ff7 100644 (file)
@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
        if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
                nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
                eth_hw_addr_random(nn->dp.netdev);
+               nfp_nsp_close(nsp);
                return;
        }
 
@@ -332,8 +333,10 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
                goto err_free_alink;
 
        alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
-       if (!alink->prio_map)
+       if (!alink->prio_map) {
+               err = -ENOMEM;
                goto err_free_alink;
+       }
 
        /* This is a multi-host app, make sure MAC/PHY is up, but don't
         * make the MAC/PHY state follow the state of any of the ports.
index c694dbc239d0129bb22c0787c9c23fca5d501f0a..6b60771ccb195e407c90ea5f056f56f8dd03fece 100644 (file)
@@ -1440,7 +1440,8 @@ __nfp_flower_update_merge_stats(struct nfp_app *app,
                ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
                priv->stats[ctx_id].pkts += pkts;
                priv->stats[ctx_id].bytes += bytes;
-               max_t(u64, priv->stats[ctx_id].used, used);
+               priv->stats[ctx_id].used = max_t(u64, used,
+                                                priv->stats[ctx_id].used);
        }
 }
 
index 5f8fc58d42b3d31cd0c4e5318e8012e0e343f959..11621ccc1faf0837fdd9945e970309ce10f6d72b 100644 (file)
@@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
        debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
        debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
        debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
-       debugfs_create_u8("done_color", 0400, cq_dentry,
-                         (u8 *)&cq->done_color);
+       debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
 
        debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
 
index 5acf4f46c268b96f973ece41a36e6e185a10c704..f8a9c1bcffc9b84174384c8cd693570ecc24bc8c 100644 (file)
@@ -2101,6 +2101,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
                ionic_txrx_free(lif);
        }
        ionic_lifs_deinit(ionic);
+       ionic_reset(ionic);
        ionic_qcqs_free(lif);
 
        dev_info(ionic->dev, "FW Down: LIFs stopped\n");
@@ -2116,6 +2117,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
 
        dev_info(ionic->dev, "FW Up: restarting LIFs\n");
 
+       ionic_init_devinfo(ionic);
+       ionic_port_init(ionic);
        err = ionic_qcqs_alloc(lif);
        if (err)
                goto err_out;
@@ -2346,7 +2349,17 @@ static int ionic_station_set(struct ionic_lif *lif)
        if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
                return 0;
 
-       if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) {
+       if (!is_zero_ether_addr(netdev->dev_addr)) {
+               /* If the netdev mac is non-zero and doesn't match the default
+                * device address, it was set by something earlier and we're
+                * likely here again after a fw-upgrade reset.  We need to be
+                * sure the netdev mac is in our filter list.
+                */
+               if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
+                                     netdev->dev_addr))
+                       ionic_lif_addr(lif, netdev->dev_addr, true);
+       } else {
+               /* Update the netdev mac with the device's mac */
                memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
                addr.sa_family = AF_INET;
                err = eth_prepare_mac_addr_change(netdev, &addr);
@@ -2356,12 +2369,6 @@ static int ionic_station_set(struct ionic_lif *lif)
                        return 0;
                }
 
-               if (!is_zero_ether_addr(netdev->dev_addr)) {
-                       netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
-                                  netdev->dev_addr);
-                       ionic_lif_addr(lif, netdev->dev_addr, false);
-               }
-
                eth_commit_mac_addr_change(netdev, &addr);
        }
 
@@ -2549,8 +2556,6 @@ int ionic_lifs_register(struct ionic *ionic)
                dev_err(ionic->dev, "Cannot register net device, aborting\n");
                return err;
        }
-
-       ionic_link_status_check_request(ionic->master_lif);
        ionic->master_lif->registered = true;
 
        return 0;
index 588c62e9add7136e480770d36bf9b2b02e42f6a9..3344bc1f7671b8d42b820aa71b3a29092fda1307 100644 (file)
@@ -509,16 +509,16 @@ int ionic_port_init(struct ionic *ionic)
        size_t sz;
        int err;
 
-       if (idev->port_info)
-               return 0;
-
-       idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
-       idev->port_info = dma_alloc_coherent(ionic->dev, idev->port_info_sz,
-                                            &idev->port_info_pa,
-                                            GFP_KERNEL);
        if (!idev->port_info) {
-               dev_err(ionic->dev, "Failed to allocate port info, aborting\n");
-               return -ENOMEM;
+               idev->port_info_sz = ALIGN(sizeof(*idev->port_info), PAGE_SIZE);
+               idev->port_info = dma_alloc_coherent(ionic->dev,
+                                                    idev->port_info_sz,
+                                                    &idev->port_info_pa,
+                                                    GFP_KERNEL);
+               if (!idev->port_info) {
+                       dev_err(ionic->dev, "Failed to allocate port info\n");
+                       return -ENOMEM;
+               }
        }
 
        sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
index 2a533280b1241c829a3140557520e48e744b40a5..29b9c728a65e2282c2c22a441bebce2277f7faf8 100644 (file)
@@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
        ahw->diag_cnt = 0;
        ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
        if (ret)
-               goto fail_diag_irq;
+               goto fail_mbx_args;
 
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
                intrpt_id = ahw->intr_tbl[0].id;
@@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
 
 done:
        qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
        qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
 
 fail_diag_irq:
index bf5bf05970a241ea3e9520903d4cf34ca1a42b94..c51b48dc36397a1f295df57395df9a69b983a924 100644 (file)
@@ -1050,6 +1050,13 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
                RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
 }
 
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
+{
+       /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+       if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
+               *cmd |= 0x7f0 << 18;
+}
+
 DECLARE_RTL_COND(rtl_eriar_cond)
 {
        return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
@@ -1058,9 +1065,12 @@ DECLARE_RTL_COND(rtl_eriar_cond)
 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
                           u32 val, int type)
 {
+       u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
        BUG_ON((addr & 3) || (mask == 0));
        RTL_W32(tp, ERIDR, val);
-       RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+       r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+       RTL_W32(tp, ERIAR, cmd);
 
        rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
 }
@@ -1073,7 +1083,10 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
 
 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
 {
-       RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+       u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+       r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+       RTL_W32(tp, ERIAR, cmd);
 
        return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
                RTL_R32(tp, ERIDR) : ~0;
@@ -2127,6 +2140,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
                { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
                { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
                { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+               /* RTL8401, reportedly works if treated as RTL8101e */
+               { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
                { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
                { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
                { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
index 7305e8e86c51fea36504f6440edb73a5bb67bf77..6646eba9f57fe6014bd4a9c7d94f00a68626ddc7 100644 (file)
@@ -848,14 +848,14 @@ static int ioc3eth_probe(struct platform_device *pdev)
        ip = netdev_priv(dev);
        ip->dma_dev = pdev->dev.parent;
        ip->regs = devm_platform_ioremap_resource(pdev, 0);
-       if (!ip->regs) {
-               err = -ENOMEM;
+       if (IS_ERR(ip->regs)) {
+               err = PTR_ERR(ip->regs);
                goto out_free;
        }
 
        ip->ssram = devm_platform_ioremap_resource(pdev, 1);
-       if (!ip->ssram) {
-               err = -ENOMEM;
+       if (IS_ERR(ip->ssram)) {
+               err = PTR_ERR(ip->ssram);
                goto out_free;
        }
 
index 49a6a9167af43a95203e40bf6b11af4cd130101d..fc168f85e7af78d82ad622afe18c83a3a38f21eb 100644 (file)
@@ -2493,20 +2493,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        retval = smsc911x_init(dev);
        if (retval < 0)
-               goto out_disable_resources;
+               goto out_init_fail;
 
        netif_carrier_off(dev);
 
        retval = smsc911x_mii_init(pdev, dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
-               goto out_disable_resources;
+               goto out_init_fail;
        }
 
        retval = register_netdev(dev);
        if (retval) {
                SMSC_WARN(pdata, probe, "Error %i registering device", retval);
-               goto out_disable_resources;
+               goto out_init_fail;
        } else {
                SMSC_TRACE(pdata, probe,
                           "Network interface: \"%s\"", dev->name);
@@ -2547,9 +2547,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
 
        return 0;
 
-out_disable_resources:
+out_init_fail:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
+out_disable_resources:
        (void)smsc911x_disable_resources(pdev);
 out_enable_resources_fail:
        smsc911x_free_resources(pdev);
index 6ae13dc195105571e9ac572fc268d103ef9b8f09..02102c781a8cf4d786cf6b3f12981c63ddcc1f93 100644 (file)
@@ -319,6 +319,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
        /* Enable PTP clock */
        regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
        val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+                       NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+                               NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+               break;
+       default:
+               /* We don't get here; the switch above will have errored out */
+               unreachable();
+       }
        regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
 
        if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
index e0a5fe83d8e0e8a35210a05293f9f922201c3d3b..bfc4a92f1d92b8c9cf6fa94b27d9ab8a7f724b37 100644 (file)
@@ -75,6 +75,11 @@ struct ethqos_emac_por {
        unsigned int value;
 };
 
+struct ethqos_emac_driver_data {
+       const struct ethqos_emac_por *por;
+       unsigned int num_por;
+};
+
 struct qcom_ethqos {
        struct platform_device *pdev;
        void __iomem *rgmii_base;
@@ -171,6 +176,11 @@ static const struct ethqos_emac_por emac_v2_3_0_por[] = {
        { .offset = RGMII_IO_MACRO_CONFIG2,     .value = 0x00002060 },
 };
 
+static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
+       .por = emac_v2_3_0_por,
+       .num_por = ARRAY_SIZE(emac_v2_3_0_por),
+};
+
 static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
 {
        unsigned int val;
@@ -442,6 +452,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct plat_stmmacenet_data *plat_dat;
        struct stmmac_resources stmmac_res;
+       const struct ethqos_emac_driver_data *data;
        struct qcom_ethqos *ethqos;
        struct resource *res;
        int ret;
@@ -471,7 +482,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
                goto err_mem;
        }
 
-       ethqos->por = of_device_get_match_data(&pdev->dev);
+       data = of_device_get_match_data(&pdev->dev);
+       ethqos->por = data->por;
+       ethqos->num_por = data->num_por;
 
        ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
        if (IS_ERR(ethqos->rgmii_clk)) {
@@ -526,7 +539,7 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id qcom_ethqos_match[] = {
-       { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
+       { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
        { }
 };
 MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
index 494c859b4ade8ab39a2939a3b21dd210e633dcfe..67ba67ed0cb99f9a1df76529a47fba1dee73f386 100644 (file)
@@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
                total_offset += offset;
        }
 
-       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
        total_ctr += total_offset;
 
        ctr_low = do_div(total_ctr, 1000000000);
index 565da6498c846eac02648dd0c9da946431f58b5c..7e9cbfd235308b24bb323e87a6b5948307b8f94e 100644 (file)
@@ -630,7 +630,8 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
-                       ts_event_en = PTP_TCR_TSEVNTENA;
+                       if (priv->synopsys_id != DWMAC_CORE_5_10)
+                               ts_event_en = PTP_TCR_TSEVNTENA;
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
                        ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -4060,7 +4061,7 @@ static int stmmac_set_features(struct net_device *netdev,
 /**
  *  stmmac_interrupt - main ISR
  *  @irq: interrupt number.
- *  @dev_id: to pass the net device pointer.
+ *  @dev_id: to pass the net device pointer (must be valid).
  *  Description: this is the main driver interrupt service routine.
  *  It can call:
  *  o DMA service routine (to manage incoming frame reception and transmission
@@ -4084,11 +4085,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
        if (priv->irq_wake)
                pm_wakeup_event(priv->device, 0);
 
-       if (unlikely(!dev)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
@@ -4991,7 +4987,7 @@ int stmmac_dvr_probe(struct device *device,
                                                 priv->plat->bsp_priv);
 
                if (ret < 0)
-                       return ret;
+                       goto error_serdes_powerup;
        }
 
 #ifdef CONFIG_DEBUG_FS
@@ -5000,6 +4996,8 @@ int stmmac_dvr_probe(struct device *device,
 
        return ret;
 
+error_serdes_powerup:
+       unregister_netdev(ndev);
 error_netdev_register:
        phylink_destroy(priv->phylink);
 error_phy_setup:
@@ -5193,8 +5191,6 @@ int stmmac_resume(struct device *dev)
                        return ret;
        }
 
-       netif_device_attach(ndev);
-
        mutex_lock(&priv->lock);
 
        stmmac_reset_queues_param(priv);
@@ -5221,6 +5217,8 @@ int stmmac_resume(struct device *dev)
 
        phylink_mac_change(priv->phylink, true);
 
+       netif_device_attach(ndev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(stmmac_resume);
index e6d1aa882fa5b116b224ce9c801664954d075f36..f1c8615ab6f04e4518a4beb58fcd68bf7ebafe14 100644 (file)
@@ -4963,7 +4963,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                          cas_cacheline_size)) {
                        dev_err(&pdev->dev, "Could not set PCI cache "
                               "line size\n");
-                       goto err_write_cacheline;
+                       goto err_out_free_res;
                }
        }
 #endif
@@ -5136,7 +5136,6 @@ err_out_iounmap:
 err_out_free_res:
        pci_release_regions(pdev);
 
-err_write_cacheline:
        /* Try to restore it in case the error occurred after we
         * set it.
         */
index 89cec778cf2d52176c63d9d474760422e1db180a..62f809b67469b948c2e22fd310e20929d61a56d9 100644 (file)
@@ -49,6 +49,7 @@ config TI_CPSW_PHY_SEL
 config TI_CPSW
        tristate "TI CPSW Switch Support"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+       depends on TI_CPTS || !TI_CPTS
        select TI_DAVINCI_MDIO
        select MFD_SYSCON
        select PAGE_POOL
@@ -64,6 +65,7 @@ config TI_CPSW_SWITCHDEV
        tristate "TI CPSW Switch Support with switchdev"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
        depends on NET_SWITCHDEV
+       depends on TI_CPTS || !TI_CPTS
        select PAGE_POOL
        select TI_DAVINCI_MDIO
        select MFD_SYSCON
@@ -77,24 +79,16 @@ config TI_CPSW_SWITCHDEV
          will be called cpsw_new.
 
 config TI_CPTS
-       bool "TI Common Platform Time Sync (CPTS) Support"
-       depends on TI_CPSW || TI_KEYSTONE_NETCP || TI_CPSW_SWITCHDEV || COMPILE_TEST
+       tristate "TI Common Platform Time Sync (CPTS) Support"
+       depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
        depends on COMMON_CLK
-       depends on POSIX_TIMERS
+       depends on PTP_1588_CLOCK
        ---help---
          This driver supports the Common Platform Time Sync unit of
          the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
          The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
          driver offers a PTP Hardware Clock.
 
-config TI_CPTS_MOD
-       tristate
-       depends on TI_CPTS
-       default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
-       select NET_PTP_CLASSIFY
-       imply PTP_1588_CLOCK
-       default m
-
 config TI_K3_AM65_CPSW_NUSS
        tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
        depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
@@ -115,6 +109,7 @@ config TI_KEYSTONE_NETCP
        select TI_DAVINCI_MDIO
        depends on OF
        depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+       depends on TI_CPTS || !TI_CPTS
        ---help---
          This driver supports TI's Keystone NETCP Core.
 
index 53792190e9c2e6c22b72cc588ee2ef5f7630d44e..cb26a9d21869e6f3647173d91c174be8f5ddd1c5 100644 (file)
@@ -13,7 +13,7 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
 ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
-obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
 obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
index 2bf56733ba94cf447592080e67ae3b99939a90ee..88f52a2f85b37dbce467a5c7dae940d68de493bf 100644 (file)
@@ -1719,7 +1719,8 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
 
                ret = devm_request_irq(dev, tx_chn->irq,
                                       am65_cpsw_nuss_tx_irq,
-                                      0, tx_chn->tx_chn_name, tx_chn);
+                                      IRQF_TRIGGER_HIGH,
+                                      tx_chn->tx_chn_name, tx_chn);
                if (ret) {
                        dev_err(dev, "failure requesting tx%u irq %u, %d\n",
                                tx_chn->id, tx_chn->irq, ret);
@@ -1744,7 +1745,7 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
 
        ret = devm_request_irq(dev, common->rx_chns.irq,
                               am65_cpsw_nuss_rx_irq,
-                              0, dev_name(dev), common);
+                              IRQF_TRIGGER_HIGH, dev_name(dev), common);
        if (ret) {
                dev_err(dev, "failure requesting rx irq %u, %d\n",
                        common->rx_chns.irq, ret);
@@ -1894,8 +1895,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
        ale_params.nu_switch_ale = true;
 
        common->ale = cpsw_ale_create(&ale_params);
-       if (!common->ale) {
+       if (IS_ERR(common->ale)) {
                dev_err(dev, "error initializing ale engine\n");
+               ret = PTR_ERR(common->ale);
                goto err_of_clear;
        }
 
index c2c5bf87da0141cf32f2eea976936cf922774edf..ffeb8633e5305a40a0698f19c816059a3a92413d 100644 (file)
@@ -1753,11 +1753,15 @@ static int cpsw_suspend(struct device *dev)
        struct cpsw_common *cpsw = dev_get_drvdata(dev);
        int i;
 
+       rtnl_lock();
+
        for (i = 0; i < cpsw->data.slaves; i++)
                if (cpsw->slaves[i].ndev)
                        if (netif_running(cpsw->slaves[i].ndev))
                                cpsw_ndo_stop(cpsw->slaves[i].ndev);
 
+       rtnl_unlock();
+
        /* Select sleep pin state */
        pinctrl_pm_select_sleep_state(dev);
 
index 0374e6936091ea004e55598f88a4e8487905aa45..8dc6be11b2ff45bb73b4c9de647a4ab89a6313d8 100644 (file)
@@ -955,7 +955,7 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
 
        ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
        if (!ale)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ale->p0_untag_vid_mask =
                devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
index 97a058ca60ac9b177f7945e3b9be9ccfe9eb070e..d0b6c418a8704198c389623601bf8238600ca065 100644 (file)
@@ -490,9 +490,9 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
        ale_params.ale_ports            = CPSW_ALE_PORTS_NUM;
 
        cpsw->ale = cpsw_ale_create(&ale_params);
-       if (!cpsw->ale) {
+       if (IS_ERR(cpsw->ale)) {
                dev_err(dev, "error initializing ale engine\n");
-               return -ENODEV;
+               return PTR_ERR(cpsw->ale);
        }
 
        dma_params.dev          = dev;
index fb36115e9c510c7315eb13e0992575a59d091dc4..fdbae734acce78059856421959a2dba925b718ca 100644 (file)
@@ -3704,9 +3704,9 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                ale_params.nu_switch_ale = true;
        }
        gbe_dev->ale = cpsw_ale_create(&ale_params);
-       if (!gbe_dev->ale) {
+       if (IS_ERR(gbe_dev->ale)) {
                dev_err(gbe_dev->dev, "error initializing ale engine\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(gbe_dev->ale);
                goto free_sec_ports;
        } else {
                dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
index b50c3ec3495bb152a73a2c8ab889b3305339b0f7..6bcda20ed7e72b5946fb7bc3a97eb1259e36a0cb 100644 (file)
@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
                linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
                linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
        }
-       linkmode_and(phydev->supported, phydev->supported, mask);
+       linkmode_andnot(phydev->supported, phydev->supported, mask);
        linkmode_copy(phydev->advertising, phydev->supported);
 
        lp->link = 0;
index 672cd2caf2fbec6f1020f4dc30adfb8d96a8f917..21640a035d7df30de40da0b14a7e6abd309550ef 100644 (file)
@@ -1169,11 +1169,11 @@ out_unlock:
 static struct genl_family gtp_genl_family;
 
 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
-                             u32 type, struct pdp_ctx *pctx)
+                             int flags, u32 type, struct pdp_ctx *pctx)
 {
        void *genlh;
 
-       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
                            type);
        if (genlh == NULL)
                goto nlmsg_failure;
@@ -1227,8 +1227,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
        }
 
-       err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
-                                info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+       err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
+                                0, info->nlhdr->nlmsg_type, pctx);
        if (err < 0)
                goto err_unlock_free;
 
@@ -1271,6 +1271,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
                                    gtp_genl_fill_info(skb,
                                            NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
+                                           NLM_F_MULTI,
                                            cb->nlh->nlmsg_type, pctx)) {
                                        cb->args[0] = i;
                                        cb->args[1] = j;
index fbea6f232819e905f0fedd832127f4f9ab7a4aba..e2ad3c2e8df5e673e685b6396884eacdd4ea0e7c 100644 (file)
@@ -127,7 +127,8 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
 {
        struct bpqdev *bpq;
 
-       list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
+       list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list,
+                               lockdep_rtnl_is_held()) {
                if (bpq->ethdev == dev)
                        return bpq->axdev;
        }
index d8e86bdbfba1e68d861d7c5bf9b91dd947cc0a65..ebcfbae056900be8087fcbe03913d1a1e0dbb717 100644 (file)
@@ -707,7 +707,8 @@ no_memory:
        goto drop;
 }
 
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
+                                    struct net_device *ndev)
 {
        return netvsc_xmit(skb, ndev, false);
 }
index 845478a19a4f46b30fd448f8b852a5ae5bb2e8c2..8d9ca1c335e852d3b643e93931aede9c3f4dacc3 100644 (file)
@@ -1041,6 +1041,7 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
 
        complete(&gsi->completion);
 }
+
 /* Inter-EE interrupt handler */
 static void gsi_isr_glob_ee(struct gsi *gsi)
 {
@@ -1391,6 +1392,7 @@ static int gsi_channel_poll(struct napi_struct *napi, int budget)
        while (count < budget) {
                struct gsi_trans *trans;
 
+               count++;
                trans = gsi_channel_poll_one(channel);
                if (!trans)
                        break;
@@ -1493,6 +1495,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
        struct completion *completion = &gsi->completion;
        u32 val;
 
+       /* First zero the result code field */
+       val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+       val &= ~GENERIC_EE_RESULT_FMASK;
+       iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+
+       /* Now issue the command */
        val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
        val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
        val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
@@ -1798,9 +1806,9 @@ static int gsi_channel_init_one(struct gsi *gsi,
 
        /* Worst case we need an event for every outstanding TRE */
        if (data->channel.tre_count > data->channel.event_count) {
-               dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
-                       data->channel_id, data->channel.tre_count);
                tre_count = data->channel.event_count;
+               dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+                        data->channel_id, tre_count);
        } else {
                tre_count = data->channel.tre_count;
        }
index 7613b9cc7cf69f65acdcc04ee80e52a0d410f127..acc9e744c67d16ca0813bf435857f9b747eb7325 100644 (file)
 #define INTER_EE_RESULT_FMASK          GENMASK(2, 0)
 #define GENERIC_EE_RESULT_FMASK                GENMASK(7, 5)
 #define GENERIC_EE_SUCCESS_FVAL                        1
+#define GENERIC_EE_INCORRECT_DIRECTION_FVAL    3
+#define GENERIC_EE_INCORRECT_CHANNEL_FVAL      5
 #define GENERIC_EE_NO_RESOURCES_FVAL           7
 #define USB_MAX_PACKET_FMASK           GENMASK(15, 15) /* 0: HS; 1: SS */
 #define MHI_BASE_CHANNEL_FMASK         GENMASK(31, 24)
index 2fd21d75367d21ceb720fe9fdf1c36227c3723df..bdbfeed359db35d5b09b05eea5b7c3379171c1ea 100644 (file)
@@ -399,13 +399,14 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
        /* assert(which < trans->tre_count); */
 
        /* Set the page information for the buffer.  We also need to fill in
-        * the DMA address for the buffer (something dma_map_sg() normally
-        * does).
+        * the DMA address and length for the buffer (something dma_map_sg()
+        * normally does).
         */
        sg = &trans->sgl[which];
 
        sg_set_buf(sg, buf, size);
        sg_dma_address(sg) = addr;
+       sg_dma_len(sg) = sg->length;
 
        info = &trans->info[which];
        info->opcode = opcode;
index d226b858742d007b2ff069d96b00be42e152eb4b..cee417181f981d5ab46ed2f9f86d3719b7620d86 100644 (file)
@@ -628,23 +628,15 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
 
 void ipa_cmd_tag_process_add(struct gsi_trans *trans)
 {
-       ipa_cmd_register_write_add(trans, 0, 0, 0, true);
-#if 1
-       /* Reference these functions to avoid a compile error */
-       (void)ipa_cmd_ip_packet_init_add;
-       (void)ipa_cmd_ip_tag_status_add;
-       (void) ipa_cmd_transfer_add;
-#else
        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
-       struct gsi_endpoint *endpoint;
+       struct ipa_endpoint *endpoint;
 
        endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
-       ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
 
+       ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+       ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
        ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
-
        ipa_cmd_transfer_add(trans, 4);
-#endif
 }
 
 /* Returns the number of commands required for the tag process */
index 6de03be287848c0a2d7e4223bb76c4bd019a5dc6..a21534f1462fa5d81fb2f30ed969327d5881776e 100644 (file)
@@ -1283,7 +1283,7 @@ static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
  */
 int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
 {
-       u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
+       u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
        int ret;
 
        do {
@@ -1291,12 +1291,9 @@ int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
                struct gsi *gsi = &ipa->gsi;
 
                ret = gsi_channel_stop(gsi, endpoint->channel_id);
-               if (ret != -EAGAIN)
+               if (ret != -EAGAIN || endpoint->toward_ipa)
                        break;
 
-               if (endpoint->toward_ipa)
-                       continue;
-
                /* For IPA v3.5.1, send a DMA read task and check again */
                if (ipa->version == IPA_VERSION_3_5_1) {
                        ret = ipa_endpoint_stop_rx_dma(ipa);
index 4d33aa7ebfbb0be09e7a2ba19f9384a8f985edc9..a5f7a79a19238c28579dda5ee0f633100429f5ee 100644 (file)
@@ -53,7 +53,7 @@
  * @clock_on:          Whether IPA clock is on
  * @notified:          Whether modem has been notified of clock state
  * @disabled:          Whether setup ready interrupt handling is disabled
- * @mutex mutex:       Motex protecting ready interrupt/shutdown interlock
+ * @mutex:             Mutex protecting ready-interrupt/shutdown interlock
  * @panic_notifier:    Panic notifier structure
 */
 struct ipa_smp2p {
index 758baf7cb8a16cbf2ac6393a74c99e08a1279bfa..d0d31cb991803daa2a18099605d35f1fbb406eb2 100644 (file)
@@ -1305,7 +1305,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
        struct crypto_aead *tfm;
        int ret;
 
-       tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+       /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm))
                return tfm;
@@ -2640,11 +2641,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
        if (ret)
                goto rollback;
 
-       rtnl_unlock();
        /* Force features update, since they are different for SW MACSec and
         * HW offloading cases.
         */
        netdev_update_features(dev);
+
+       rtnl_unlock();
        return 0;
 
 rollback:
index 68668a22b9dda01be2d1de92b72a48e7279b1c97..dc3ff0e20944db01a246d78ee15bb5b7717437c5 100644 (file)
@@ -858,8 +858,7 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
                return -EINVAL;
 
        cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
-       *p_drops = *cnt;
-       *cnt += jiffies % 64;
+       *p_drops = (*cnt)++;
 
        return 0;
 }
index ae4873f2f86efb0abcc117f0aac11694b6def9a6..d14d91b759b75e2348db7042d7291432ece11ff9 100644 (file)
@@ -225,8 +225,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
        else
                val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
 
-       if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
-               val |= BCM54XX_SHD_SCR3_TRDDAPD;
+       if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
+               if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810)
+                       val |= BCM54810_SHD_SCR3_TRDDAPD;
+               else
+                       val |= BCM54XX_SHD_SCR3_TRDDAPD;
+       }
 
        if (orig != val)
                bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
index 415c273109829959c28316a62b271ac3bfde83a3..ecbd5e0d685cf3cc3b3d46fbb2f1456c44ed0472 100644 (file)
@@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
                goto out;
        }
        dp83640_clock_init(clock, bus);
-       list_add_tail(&phyter_clocks, &clock->list);
+       list_add_tail(&clock->list, &phyter_clocks);
 out:
        mutex_unlock(&phyter_clocks_lock);
 
index fe9aa3ad52a71f8bbea3c26f1b08a1bb4a1e33c4..1dd19d0cb269f5a945b37e09cec3824f2f9e1eb1 100644 (file)
@@ -137,19 +137,18 @@ static int dp83822_set_wol(struct phy_device *phydev,
                        value &= ~DP83822_WOL_SECURE_ON;
                }
 
-               value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
-                         DP83822_WOL_CLR_INDICATION);
-               phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-                             value);
+               /* Clear any pending WoL interrupt */
+               phy_read(phydev, MII_DP83822_MISR2);
+
+               value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
+                        DP83822_WOL_CLR_INDICATION;
+
+               return phy_write_mmd(phydev, DP83822_DEVADDR,
+                                    MII_DP83822_WOL_CFG, value);
        } else {
-               value = phy_read_mmd(phydev, DP83822_DEVADDR,
-                                    MII_DP83822_WOL_CFG);
-               value &= ~DP83822_WOL_EN;
-               phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-                             value);
+               return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+                                         MII_DP83822_WOL_CFG, DP83822_WOL_EN);
        }
-
-       return 0;
 }
 
 static void dp83822_get_wol(struct phy_device *phydev,
@@ -258,12 +257,11 @@ static int dp83822_config_intr(struct phy_device *phydev)
 
 static int dp83822_config_init(struct phy_device *phydev)
 {
-       int value;
-
-       value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
+       int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
+                   DP83822_WOL_SECURE_ON;
 
-       return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-             value);
+       return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+                                 MII_DP83822_WOL_CFG, value);
 }
 
 static int dp83822_phy_reset(struct phy_device *phydev)
index 06f08832ebcdea64413eb0fc49653d5ba82ab75c..d73725312c7c353e382417b341f5f241dbaf2dbf 100644 (file)
@@ -139,16 +139,19 @@ static int dp83811_set_wol(struct phy_device *phydev,
                        value &= ~DP83811_WOL_SECURE_ON;
                }
 
-               value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
-                         DP83811_WOL_CLR_INDICATION);
-               phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-                             value);
+               /* Clear any pending WoL interrupt */
+               phy_read(phydev, MII_DP83811_INT_STAT1);
+
+               value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
+                        DP83811_WOL_CLR_INDICATION;
+
+               return phy_write_mmd(phydev, DP83811_DEVADDR,
+                                    MII_DP83811_WOL_CFG, value);
        } else {
-               phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-                                  DP83811_WOL_EN);
+               return phy_clear_bits_mmd(phydev, DP83811_DEVADDR,
+                                         MII_DP83811_WOL_CFG, DP83811_WOL_EN);
        }
 
-       return 0;
 }
 
 static void dp83811_get_wol(struct phy_device *phydev,
@@ -292,8 +295,8 @@ static int dp83811_config_init(struct phy_device *phydev)
 
        value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
 
-       return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-             value);
+       return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+                                 value);
 }
 
 static int dp83811_phy_reset(struct phy_device *phydev)
index ff12492771ab7860f53aca370ae7fc04230921bb..1f1a01c98e4406b6b959bd9fa6d4307342304b87 100644 (file)
@@ -66,6 +66,9 @@ enum {
        MV_PCS_CSSR1_SPD2_2500  = 0x0004,
        MV_PCS_CSSR1_SPD2_10000 = 0x0000,
 
+       /* Temperature read register (88E2110 only) */
+       MV_PCS_TEMP             = 0x8042,
+
        /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
         * registers appear to set themselves to the 0x800X when AN is
         * restarted, but status registers appear readable from either.
@@ -77,6 +80,7 @@ enum {
        MV_V2_PORT_CTRL         = 0xf001,
        MV_V2_PORT_CTRL_SWRST   = BIT(15),
        MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
+       /* Temperature control/read registers (88X3310 only) */
        MV_V2_TEMP_CTRL         = 0xf08a,
        MV_V2_TEMP_CTRL_MASK    = 0xc000,
        MV_V2_TEMP_CTRL_SAMPLE  = 0x0000,
@@ -104,6 +108,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data,
        return 0;
 }
 
+static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+}
+
+static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
+}
+
+static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
+               return mv3310_hwmon_read_temp_reg(phydev);
+       else /* MARVELL_PHY_ID_88E2110 */
+               return mv2110_hwmon_read_temp_reg(phydev);
+}
+
 static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
                             u32 attr, int channel, long *value)
 {
@@ -116,7 +138,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
        }
 
        if (type == hwmon_temp && attr == hwmon_temp_input) {
-               temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+               temp = mv10g_hwmon_read_temp_reg(phydev);
                if (temp < 0)
                        return temp;
 
@@ -169,6 +191,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
        u16 val;
        int ret;
 
+       if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310)
+               return 0;
+
        ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP,
                            MV_V2_TEMP_UNKNOWN);
        if (ret < 0)
index 030bf8b600dfeada44de2ed22649e9bf4b07be3c..414e3b31bb1fa77da4680a9ca8829f9bc664adae 100644 (file)
@@ -354,6 +354,8 @@ struct vsc8531_private {
        u64 *stats;
        int nstats;
        bool pkg_init;
+       /* PHY address within the package. */
+       u8 addr;
        /* For multiple port PHYs; the MDIO address of the base PHY in the
         * package.
         */
index fcb5ba5e5d033c8a3ae66673337fa9f16d6c0b15..59b6837c60b3123c255794d226ef71f51322a0c8 100644 (file)
 #define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE                   BIT(0)
 #define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN              BIT(4)
 
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL                      0x2
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x)     (x)
-#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M      GENMASK(2, 0)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL                        0x2
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x)       (x)
+#define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M        GENMASK(2, 0)
 
 #endif /* _MSCC_PHY_LINE_MAC_H_ */
index e99e2cd72a0c413f1109bd819cc1d60e4468a792..b4d3dc4068e279c45e6499b4ce4b0de406c92646 100644 (file)
@@ -316,6 +316,8 @@ static void vsc8584_macsec_mac_init(struct phy_device *phydev,
 /* Must be called with mdio_lock taken */
 static int __vsc8584_macsec_init(struct phy_device *phydev)
 {
+       struct vsc8531_private *priv = phydev->priv;
+       enum macsec_bank proc_bank;
        u32 val;
 
        vsc8584_macsec_block_init(phydev, MACSEC_INGR);
@@ -351,12 +353,14 @@ static int __vsc8584_macsec_init(struct phy_device *phydev)
        val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
        vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
 
-       val = vsc8584_macsec_phy_read(phydev, IP_1588,
-                                     MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
-       val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
-       val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
-       vsc8584_macsec_phy_write(phydev, IP_1588,
-                                MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
+       proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2;
+
+       val = vsc8584_macsec_phy_read(phydev, proc_bank,
+                                     MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL);
+       val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
+       val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
+       vsc8584_macsec_phy_write(phydev, proc_bank,
+                                MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
 
        return 0;
 }
index d0783944d106873caf2fef5faf974c900b04d22b..d751f2946b7926bc310fa7819eae61fe5465d286 100644 (file)
@@ -64,7 +64,8 @@ enum macsec_bank {
        FC_BUFFER   = 0x04,
        HOST_MAC    = 0x05,
        LINE_MAC    = 0x06,
-       IP_1588     = 0x0e,
+       PROC_0      = 0x0e,
+       PROC_2      = 0x0f,
        MACSEC_INGR = 0x38,
        MACSEC_EGR  = 0x3c,
 };
index acddef79f4e8d5df07962215843326a9b438085c..c8aa6d905d8e63759782153cd58d26e3bf40157f 100644 (file)
@@ -1347,6 +1347,8 @@ static int vsc8584_config_init(struct phy_device *phydev)
        else
                vsc8531->base_addr = phydev->mdio.addr - addr;
 
+       vsc8531->addr = addr;
+
        /* Some parts of the init sequence are identical for every PHY in the
         * package. Some parts are modifying the GPIO register bank which is a
         * set of registers that are affecting all PHYs, a few resetting the
@@ -1771,6 +1773,8 @@ static int vsc8514_config_init(struct phy_device *phydev)
        else
                vsc8531->base_addr = phydev->mdio.addr - addr;
 
+       vsc8531->addr = addr;
+
        /* Some parts of the init sequence are identical for every PHY in the
         * package. Some parts are modifying the GPIO register bank which is a
         * set of registers that are affecting all PHYs, a few resetting the
index 72c69a9c8a98aef62cdef34a186fa3b7b1657a44..20ca6418f7bc7ff34347310078884c2547a35fcd 100644 (file)
@@ -1132,9 +1132,11 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
                /* Restart autonegotiation so the new modes get sent to the
                 * link partner.
                 */
-               ret = phy_restart_aneg(phydev);
-               if (ret < 0)
-                       return ret;
+               if (phydev->autoneg == AUTONEG_ENABLE) {
+                       ret = phy_restart_aneg(phydev);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return 0;
index ac2784192472fcdeeb5df2c11ba475d2da98b0d8..697c74deb222b4444b510e3db77d7d1f7ee2e33b 100644 (file)
@@ -1233,7 +1233,7 @@ int phy_sfp_probe(struct phy_device *phydev,
                  const struct sfp_upstream_ops *ops)
 {
        struct sfp_bus *bus;
-       int ret;
+       int ret = 0;
 
        if (phydev->mdio.dev.fwnode) {
                bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode);
@@ -1245,7 +1245,7 @@ int phy_sfp_probe(struct phy_device *phydev,
                ret = sfp_bus_add_upstream(bus, phydev, ops);
                sfp_bus_put(bus);
        }
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(phy_sfp_probe);
 
index d760a36db28cb43237bbad4309eaf509a5bf681d..beedaad082551b89a90f8c41ca78fb8a312a5099 100644 (file)
@@ -490,6 +490,9 @@ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!skb)
                goto out;
 
+       if (skb->pkt_type != PACKET_HOST)
+               goto abort;
+
        if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
                goto abort;
 
index 0cdb2ce47645c5874218a0ec8b77d299840706aa..a657943c9f01b9da893d758b0bdb3d4944fdd3b4 100644 (file)
@@ -815,14 +815,21 @@ static const struct usb_device_id products[] = {
        .driver_info = 0,
 },
 
-/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
        .driver_info = 0,
 },
 
-       /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
+/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
index 417e42c9fd03895ba979519dc5cfe7af15526cef..bb8c34d746ab33a8cd187baea128d9525e9a3c9f 100644 (file)
@@ -2659,7 +2659,7 @@ static struct hso_device *hso_create_bulk_serial_device(
        if (!
            (serial->out_endp =
             hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
-               dev_err(&interface->dev, "Failed to find BULK IN ep\n");
+               dev_err(&interface->dev, "Failed to find BULK OUT ep\n");
                goto exit2;
        }
 
index 6c738a271257d4f180774e91b10494b0d96fa5c0..4a2c7355be63d7336b5f47d1e4c7487349136fbb 100644 (file)
@@ -1324,6 +1324,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
@@ -1359,6 +1360,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81cc, 8)},    /* Dell Wireless 5816e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e preproduction config */
        {QMI_FIXED_INTF(0x413c, 0x81e0, 0)},    /* Dell Wireless 5821e with eSIM support*/
index 8f8d9883d3634427c61f937a8965dea84fa8e565..c8c873a613b66935239677ab7ba8d79d59228fd2 100644 (file)
@@ -6880,6 +6880,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
        {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
        {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3062)},
index 11f7224605133aa1ee1eede78a63c2dfc02fc201..ce07f52d89e7ab6f6df9e4141998d4dc912c15b9 100644 (file)
@@ -1243,9 +1243,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
                        break;
        } while (rq->vq->num_free);
        if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
-               u64_stats_update_begin(&rq->stats.syncp);
+               unsigned long flags;
+
+               flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
                rq->stats.kicks++;
-               u64_stats_update_end(&rq->stats.syncp);
+               u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
        }
 
        return !oom;
index b8a7b9ce32ba7a54d18fcadb326f27431428a02c..208da72673fc020f6d91e68fb7e69c597768b662 100644 (file)
@@ -32,7 +32,7 @@ enum cookie_values {
 };
 
 enum counter_values {
-       COUNTER_BITS_TOTAL = 2048,
+       COUNTER_BITS_TOTAL = 8192,
        COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
        COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
 };
index 708dc61c974f7397db15e929caecded781f6dddf..626433690abb396deb80315856648bd1adda7276 100644 (file)
@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer)
 
        if (unlikely(!keypair))
                return NULL;
+       spin_lock_init(&keypair->receiving_counter.lock);
        keypair->internal_id = atomic64_inc_return(&keypair_counter);
        keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
        keypair->entry.peer = peer;
@@ -358,25 +359,16 @@ out:
        memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
 }
 
-static void symmetric_key_init(struct noise_symmetric_key *key)
-{
-       spin_lock_init(&key->counter.receive.lock);
-       atomic64_set(&key->counter.counter, 0);
-       memset(key->counter.receive.backtrack, 0,
-              sizeof(key->counter.receive.backtrack));
-       key->birthdate = ktime_get_coarse_boottime_ns();
-       key->is_valid = true;
-}
-
 static void derive_keys(struct noise_symmetric_key *first_dst,
                        struct noise_symmetric_key *second_dst,
                        const u8 chaining_key[NOISE_HASH_LEN])
 {
+       u64 birthdate = ktime_get_coarse_boottime_ns();
        kdf(first_dst->key, second_dst->key, NULL, NULL,
            NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
            chaining_key);
-       symmetric_key_init(first_dst);
-       symmetric_key_init(second_dst);
+       first_dst->birthdate = second_dst->birthdate = birthdate;
+       first_dst->is_valid = second_dst->is_valid = true;
 }
 
 static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
@@ -715,6 +707,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
        u8 e[NOISE_PUBLIC_KEY_LEN];
        u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
        u8 static_private[NOISE_PUBLIC_KEY_LEN];
+       u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
 
        down_read(&wg->static_identity.lock);
 
@@ -733,6 +726,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
        memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
        memcpy(ephemeral_private, handshake->ephemeral_private,
               NOISE_PUBLIC_KEY_LEN);
+       memcpy(preshared_key, handshake->preshared_key,
+              NOISE_SYMMETRIC_KEY_LEN);
        up_read(&handshake->lock);
 
        if (state != HANDSHAKE_CREATED_INITIATION)
@@ -750,7 +745,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
                goto fail;
 
        /* psk */
-       mix_psk(chaining_key, hash, key, handshake->preshared_key);
+       mix_psk(chaining_key, hash, key, preshared_key);
 
        /* {} */
        if (!message_decrypt(NULL, src->encrypted_nothing,
@@ -783,6 +778,7 @@ out:
        memzero_explicit(chaining_key, NOISE_HASH_LEN);
        memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
        memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+       memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
        up_read(&wg->static_identity.lock);
        return ret_peer;
 }
index f532d59d3f19afa09ba360d34c2eb234025af63b..c527253dba80e8c3f7328656e01dac7dc330846a 100644 (file)
 #include <linux/mutex.h>
 #include <linux/kref.h>
 
-union noise_counter {
-       struct {
-               u64 counter;
-               unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
-               spinlock_t lock;
-       } receive;
-       atomic64_t counter;
+struct noise_replay_counter {
+       u64 counter;
+       spinlock_t lock;
+       unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
 };
 
 struct noise_symmetric_key {
        u8 key[NOISE_SYMMETRIC_KEY_LEN];
-       union noise_counter counter;
        u64 birthdate;
        bool is_valid;
 };
@@ -34,7 +30,9 @@ struct noise_symmetric_key {
 struct noise_keypair {
        struct index_hashtable_entry entry;
        struct noise_symmetric_key sending;
+       atomic64_t sending_counter;
        struct noise_symmetric_key receiving;
+       struct noise_replay_counter receiving_counter;
        __le32 remote_index;
        bool i_am_the_initiator;
        struct kref refcount;
index 5c964fcb994ec5f44e69d5fa987369af99afe40f..71b8e80b58e121d2cb46882bac905522c205146f 100644 (file)
@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
                if (multicore) {
                        queue->worker = wg_packet_percpu_multicore_worker_alloc(
                                function, queue);
-                       if (!queue->worker)
+                       if (!queue->worker) {
+                               ptr_ring_cleanup(&queue->ring, NULL);
                                return -ENOMEM;
+                       }
                } else {
                        INIT_WORK(&queue->work, function);
                }
index 3432232afe061317ce85cf344fccff2078811547..c58df439dbbe09bf50656707a7d6cd86171886a7 100644 (file)
@@ -87,12 +87,20 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb)
        return real_protocol && skb->protocol == real_protocol;
 }
 
-static inline void wg_reset_packet(struct sk_buff *skb)
+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
 {
+       u8 l4_hash = skb->l4_hash;
+       u8 sw_hash = skb->sw_hash;
+       u32 hash = skb->hash;
        skb_scrub_packet(skb, true);
        memset(&skb->headers_start, 0,
               offsetof(struct sk_buff, headers_end) -
                       offsetof(struct sk_buff, headers_start));
+       if (encapsulating) {
+               skb->l4_hash = l4_hash;
+               skb->sw_hash = sw_hash;
+               skb->hash = hash;
+       }
        skb->queue_mapping = 0;
        skb->nohdr = 0;
        skb->peeked = 0;
index da3b782ab7d31df11e381529b144bcc494234a38..91438144e4f7a7909f33690e15ca39103cab6061 100644 (file)
@@ -226,40 +226,39 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
 static void keep_key_fresh(struct wg_peer *peer)
 {
        struct noise_keypair *keypair;
-       bool send = false;
+       bool send;
 
        if (peer->sent_lastminute_handshake)
                return;
 
        rcu_read_lock_bh();
        keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
-       if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
-           keypair->i_am_the_initiator &&
-           unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
-                       REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
-               send = true;
+       send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+              keypair->i_am_the_initiator &&
+              wg_birthdate_has_expired(keypair->sending.birthdate,
+                       REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
        rcu_read_unlock_bh();
 
-       if (send) {
+       if (unlikely(send)) {
                peer->sent_lastminute_handshake = true;
                wg_packet_send_queued_handshake_initiation(peer, false);
        }
 }
 
-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
 {
        struct scatterlist sg[MAX_SKB_FRAGS + 8];
        struct sk_buff *trailer;
        unsigned int offset;
        int num_frags;
 
-       if (unlikely(!key))
+       if (unlikely(!keypair))
                return false;
 
-       if (unlikely(!READ_ONCE(key->is_valid) ||
-                 wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
-                 key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
-               WRITE_ONCE(key->is_valid, false);
+       if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+                 wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+                 keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+               WRITE_ONCE(keypair->receiving.is_valid, false);
                return false;
        }
 
@@ -284,7 +283,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
 
        if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
                                                 PACKET_CB(skb)->nonce,
-                                                key->key))
+                                                keypair->receiving.key))
                return false;
 
        /* Another ugly situation of pushing and pulling the header so as to
@@ -299,41 +298,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key)
 }
 
 /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
-static bool counter_validate(union noise_counter *counter, u64 their_counter)
+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
 {
        unsigned long index, index_current, top, i;
        bool ret = false;
 
-       spin_lock_bh(&counter->receive.lock);
+       spin_lock_bh(&counter->lock);
 
-       if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+       if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
                     their_counter >= REJECT_AFTER_MESSAGES))
                goto out;
 
        ++their_counter;
 
        if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
-                    counter->receive.counter))
+                    counter->counter))
                goto out;
 
        index = their_counter >> ilog2(BITS_PER_LONG);
 
-       if (likely(their_counter > counter->receive.counter)) {
-               index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+       if (likely(their_counter > counter->counter)) {
+               index_current = counter->counter >> ilog2(BITS_PER_LONG);
                top = min_t(unsigned long, index - index_current,
                            COUNTER_BITS_TOTAL / BITS_PER_LONG);
                for (i = 1; i <= top; ++i)
-                       counter->receive.backtrack[(i + index_current) &
+                       counter->backtrack[(i + index_current) &
                                ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
-               counter->receive.counter = their_counter;
+               counter->counter = their_counter;
        }
 
        index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
        ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
-                               &counter->receive.backtrack[index]);
+                               &counter->backtrack[index]);
 
 out:
-       spin_unlock_bh(&counter->receive.lock);
+       spin_unlock_bh(&counter->lock);
        return ret;
 }
 
@@ -393,13 +392,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
                        goto dishonest_packet_size;
-               if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
-                       IP_ECN_set_ce(ip_hdr(skb));
+               INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
                len = ntohs(ipv6_hdr(skb)->payload_len) +
                      sizeof(struct ipv6hdr);
-               if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
-                       IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+               INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
        } else {
                goto dishonest_packet_type;
        }
@@ -475,19 +472,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
                if (unlikely(state != PACKET_STATE_CRYPTED))
                        goto next;
 
-               if (unlikely(!counter_validate(&keypair->receiving.counter,
+               if (unlikely(!counter_validate(&keypair->receiving_counter,
                                               PACKET_CB(skb)->nonce))) {
                        net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
                                            peer->device->dev->name,
                                            PACKET_CB(skb)->nonce,
-                                           keypair->receiving.counter.receive.counter);
+                                           keypair->receiving_counter.counter);
                        goto next;
                }
 
                if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
                        goto next;
 
-               wg_reset_packet(skb);
+               wg_reset_packet(skb, false);
                wg_packet_consume_data_done(peer, skb, &endpoint);
                free = false;
 
@@ -514,10 +511,12 @@ void wg_packet_decrypt_worker(struct work_struct *work)
        struct sk_buff *skb;
 
        while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
-               enum packet_state state = likely(decrypt_packet(skb,
-                               &PACKET_CB(skb)->keypair->receiving)) ?
+               enum packet_state state =
+                       likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
                                PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
                wg_queue_enqueue_per_peer_napi(skb, state);
+               if (need_resched())
+                       cond_resched();
        }
 }
 
index f4fbb9072ed733c4da71dd04836c249b37f3d48d..ec3c156bf91bab2fa386394f4708c3d2ab6cde05 100644 (file)
@@ -6,18 +6,24 @@
 #ifdef DEBUG
 bool __init wg_packet_counter_selftest(void)
 {
+       struct noise_replay_counter *counter;
        unsigned int test_num = 0, i;
-       union noise_counter counter;
        bool success = true;
 
-#define T_INIT do {                                               \
-               memset(&counter, 0, sizeof(union noise_counter)); \
-               spin_lock_init(&counter.receive.lock);            \
+       counter = kmalloc(sizeof(*counter), GFP_KERNEL);
+       if (unlikely(!counter)) {
+               pr_err("nonce counter self-test malloc: FAIL\n");
+               return false;
+       }
+
+#define T_INIT do {                                    \
+               memset(counter, 0, sizeof(*counter));  \
+               spin_lock_init(&counter->lock);        \
        } while (0)
 #define T_LIM (COUNTER_WINDOW_SIZE + 1)
 #define T(n, v) do {                                                  \
                ++test_num;                                           \
-               if (counter_validate(&counter, n) != (v)) {           \
+               if (counter_validate(counter, n) != (v)) {            \
                        pr_err("nonce counter self-test %u: FAIL\n",  \
                               test_num);                             \
                        success = false;                              \
@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void)
 
        if (success)
                pr_info("nonce counter self-tests: pass\n");
+       kfree(counter);
        return success;
 }
 #endif
index bcd6462e454017841bf86a1973869e83097a2826..007cd4457c5f67be970866335908b4b8c020d67f 100644 (file)
@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void)
        enum { TRIALS_BEFORE_GIVING_UP = 5000 };
        bool success = false;
        int test = 0, trials;
-       struct sk_buff *skb4, *skb6;
+       struct sk_buff *skb4, *skb6 = NULL;
        struct iphdr *hdr4;
-       struct ipv6hdr *hdr6;
+       struct ipv6hdr *hdr6 = NULL;
 
        if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
                return true;
index 7348c10cbae3db54bfcb31f23c2753185735f876..f74b9341ab0fe23ff046c31a0728bbfcb16ba40e 100644 (file)
@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
 static void keep_key_fresh(struct wg_peer *peer)
 {
        struct noise_keypair *keypair;
-       bool send = false;
+       bool send;
 
        rcu_read_lock_bh();
        keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
-       if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
-           (unlikely(atomic64_read(&keypair->sending.counter.counter) >
-                     REKEY_AFTER_MESSAGES) ||
-            (keypair->i_am_the_initiator &&
-             unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
-                                               REKEY_AFTER_TIME)))))
-               send = true;
+       send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+              (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
+               (keypair->i_am_the_initiator &&
+                wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
        rcu_read_unlock_bh();
 
-       if (send)
+       if (unlikely(send))
                wg_packet_send_queued_handshake_initiation(peer, false);
 }
 
@@ -170,6 +167,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
        struct sk_buff *trailer;
        int num_frags;
 
+       /* Force hash calculation before encryption so that flow analysis is
+        * consistent over the inner packet.
+        */
+       skb_get_hash(skb);
+
        /* Calculate lengths. */
        padding_len = calculate_skb_padding(skb);
        trailer_len = padding_len + noise_encrypted_len(0);
@@ -281,6 +283,8 @@ void wg_packet_tx_worker(struct work_struct *work)
 
                wg_noise_keypair_put(keypair, false);
                wg_peer_put(peer);
+               if (need_resched())
+                       cond_resched();
        }
 }
 
@@ -296,7 +300,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
                skb_list_walk_safe(first, skb, next) {
                        if (likely(encrypt_packet(skb,
                                        PACKET_CB(first)->keypair))) {
-                               wg_reset_packet(skb);
+                               wg_reset_packet(skb, true);
                        } else {
                                state = PACKET_STATE_DEAD;
                                break;
@@ -304,7 +308,8 @@ void wg_packet_encrypt_worker(struct work_struct *work)
                }
                wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
                                          state);
-
+               if (need_resched())
+                       cond_resched();
        }
 }
 
@@ -344,7 +349,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer)
 
 void wg_packet_send_staged_packets(struct wg_peer *peer)
 {
-       struct noise_symmetric_key *key;
        struct noise_keypair *keypair;
        struct sk_buff_head packets;
        struct sk_buff *skb;
@@ -364,10 +368,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
        rcu_read_unlock_bh();
        if (unlikely(!keypair))
                goto out_nokey;
-       key = &keypair->sending;
-       if (unlikely(!READ_ONCE(key->is_valid)))
+       if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
                goto out_nokey;
-       if (unlikely(wg_birthdate_has_expired(key->birthdate,
+       if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
                                              REJECT_AFTER_TIME)))
                goto out_invalid;
 
@@ -382,7 +385,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
                 */
                PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
                PACKET_CB(skb)->nonce =
-                               atomic64_inc_return(&key->counter.counter) - 1;
+                               atomic64_inc_return(&keypair->sending_counter) - 1;
                if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
                        goto out_invalid;
        }
@@ -394,7 +397,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
        return;
 
 out_invalid:
-       WRITE_ONCE(key->is_valid, false);
+       WRITE_ONCE(keypair->sending.is_valid, false);
 out_nokey:
        wg_noise_keypair_put(keypair, false);
 
index b0d6541582d312eeed1a5a25123cb27380cd038f..f9018027fc133db48e7d20972690c7e52f4942f5 100644 (file)
@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
                        net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
                                            wg->dev->name, &endpoint->addr, ret);
                        goto err;
-               } else if (unlikely(rt->dst.dev == skb->dev)) {
-                       ip_rt_put(rt);
-                       ret = -ELOOP;
-                       net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
-                                           wg->dev->name, &endpoint->addr);
-                       goto err;
                }
                if (cache)
                        dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
                        net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
                                            wg->dev->name, &endpoint->addr, ret);
                        goto err;
-               } else if (unlikely(dst->dev == skb->dev)) {
-                       dst_release(dst);
-                       ret = -ELOOP;
-                       net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
-                                           wg->dev->name, &endpoint->addr);
-                       goto err;
                }
                if (cache)
                        dst_cache_set_ip6(cache, dst, &fl.saddr);
index 6744c0281ffb82e94d4a2763c29571c851d5f1c8..29971c25dba448740f5e8ec159d09bcd53c2e295 100644 (file)
@@ -1092,6 +1092,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
                else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
                        iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
+               else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
+                       iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr;
+               else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
+                       iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr;
        }
 
 #endif
index a1d69f9b2d4a469acf00503e04f88fc07e9bc528..0b9ca6d20ffadc48e36d15185f2272f23f6024fd 100644 (file)
@@ -173,8 +173,10 @@ static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev,
                memcpy(atr_res->gbi, atr_req->gbi, gb_len);
                r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
                                                  gb_len);
-               if (r < 0)
+               if (r < 0) {
+                       kfree_skb(skb);
                        return r;
+               }
        }
 
        info->dep_info.curr_nfc_dep_pni = 0;
index 91c1bd659947eb259292f01c4f3b86e2ec70cb7f..f3c037f5a9ba058f2f811fee80b6c41bb3b7fdf1 100644 (file)
@@ -1110,7 +1110,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
                  * Don't treat an error as fatal, as we potentially already
                  * have a NGUID or EUI-64.
                  */
-               if (status > 0)
+               if (status > 0 && !(status & NVME_SC_DNR))
                        status = 0;
                goto free_data;
        }
@@ -3642,6 +3642,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        return;
  out_put_disk:
+       /* prevent double queue cleanup */
+       ns->disk->queue = NULL;
        put_disk(ns->disk);
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
index 4e79e412b276c2140b5bca6f9f118e7621f1295e..cc46e250fcac22a075ab964a880e272b2f498433 100644 (file)
@@ -973,9 +973,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 
 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
 {
-       if (++nvmeq->cq_head == nvmeq->q_depth) {
+       u16 tmp = nvmeq->cq_head + 1;
+
+       if (tmp == nvmeq->q_depth) {
                nvmeq->cq_head = 0;
                nvmeq->cq_phase ^= 1;
+       } else {
+               nvmeq->cq_head = tmp;
        }
 }
 
@@ -985,6 +989,11 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
 
        while (nvme_cqe_pending(nvmeq)) {
                found++;
+               /*
+                * load-load control dependency between phase and the rest of
+                * the cqe requires a full read memory barrier
+                */
+               dma_rmb();
                nvme_handle_cqe(nvmeq, nvmeq->cq_head);
                nvme_update_cq_head(nvmeq);
        }
@@ -1373,16 +1382,19 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 
 /*
  * Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
  */
 static void nvme_reap_pending_cqes(struct nvme_dev *dev)
 {
        int i;
 
-       for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+       for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+               spin_lock(&dev->queues[i].cq_poll_lock);
                nvme_process_cq(&dev->queues[i]);
+               spin_unlock(&dev->queues[i].cq_poll_lock);
+       }
 }
 
 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
index 390e92f2d8d1fcd53f4bd0b9bffdfe1cbd161e1f..b761c1f72f6728c8f2978c23027363021435a23a 100644 (file)
@@ -30,6 +30,22 @@ void pci_ats_init(struct pci_dev *dev)
        dev->ats_cap = pos;
 }
 
+/**
+ * pci_ats_supported - check if the device can use ATS
+ * @dev: the PCI device
+ *
+ * Returns true if the device supports ATS and is allowed to use it, false
+ * otherwise.
+ */
+bool pci_ats_supported(struct pci_dev *dev)
+{
+       if (!dev->ats_cap)
+               return false;
+
+       return (dev->untrusted == 0);
+}
+EXPORT_SYMBOL_GPL(pci_ats_supported);
+
 /**
  * pci_enable_ats - enable the ATS capability
  * @dev: the PCI device
@@ -42,7 +58,7 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
        u16 ctrl;
        struct pci_dev *pdev;
 
-       if (!dev->ats_cap)
+       if (!pci_ats_supported(dev))
                return -EINVAL;
 
        if (WARN_ON(dev->ats_enabled))
index 3708d43b75085a29d1809e352a6bbda6b484e702..393011a05b48a640da7f70fdef3394b5026a552d 100644 (file)
@@ -815,6 +815,13 @@ static const struct of_device_id qusb2_phy_of_match_table[] = {
        }, {
                .compatible     = "qcom,msm8998-qusb2-phy",
                .data           = &msm8998_phy_cfg,
+       }, {
+               /*
+                * Deprecated. Only here to support legacy device
+                * trees that didn't include "qcom,qusb2-v2-phy"
+                */
+               .compatible     = "qcom,sdm845-qusb2-phy",
+               .data           = &qusb2_v2_phy_cfg,
        }, {
                .compatible     = "qcom,qusb2-v2-phy",
                .data           = &qusb2_v2_phy_cfg,
index d998e65c89c800e12ebf9c0dae8b3735ade4b8fe..a52a9bf13b758d99c95786a8e9e8d1cbe2fe15bf 100644 (file)
@@ -160,18 +160,11 @@ static int qcom_snps_hsphy_power_on(struct phy *phy)
        ret = regulator_bulk_enable(VREG_NUM, priv->vregs);
        if (ret)
                return ret;
-       ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks);
-       if (ret)
-               goto err_disable_regulator;
+
        qcom_snps_hsphy_disable_hv_interrupts(priv);
        qcom_snps_hsphy_exit_retention(priv);
 
        return 0;
-
-err_disable_regulator:
-       regulator_bulk_disable(VREG_NUM, priv->vregs);
-
-       return ret;
 }
 
 static int qcom_snps_hsphy_power_off(struct phy *phy)
@@ -180,7 +173,6 @@ static int qcom_snps_hsphy_power_off(struct phy *phy)
 
        qcom_snps_hsphy_enter_retention(priv);
        qcom_snps_hsphy_enable_hv_interrupts(priv);
-       clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
        regulator_bulk_disable(VREG_NUM, priv->vregs);
 
        return 0;
@@ -266,21 +258,39 @@ static int qcom_snps_hsphy_init(struct phy *phy)
        struct hsphy_priv *priv = phy_get_drvdata(phy);
        int ret;
 
-       ret = qcom_snps_hsphy_reset(priv);
+       ret = clk_bulk_prepare_enable(priv->num_clks, priv->clks);
        if (ret)
                return ret;
 
+       ret = qcom_snps_hsphy_reset(priv);
+       if (ret)
+               goto disable_clocks;
+
        qcom_snps_hsphy_init_sequence(priv);
 
        ret = qcom_snps_hsphy_por_reset(priv);
        if (ret)
-               return ret;
+               goto disable_clocks;
+
+       return 0;
+
+disable_clocks:
+       clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
+       return ret;
+}
+
+static int qcom_snps_hsphy_exit(struct phy *phy)
+{
+       struct hsphy_priv *priv = phy_get_drvdata(phy);
+
+       clk_bulk_disable_unprepare(priv->num_clks, priv->clks);
 
        return 0;
 }
 
 static const struct phy_ops qcom_snps_hsphy_ops = {
        .init = qcom_snps_hsphy_init,
+       .exit = qcom_snps_hsphy_exit,
        .power_on = qcom_snps_hsphy_power_on,
        .power_off = qcom_snps_hsphy_power_off,
        .set_mode = qcom_snps_hsphy_set_mode,
index 47a4ccd9fed43d01930a474e94dcdb17039a8c35..f579a6593f370de9af6de670886b1d8298ff40ff 100644 (file)
@@ -1435,7 +1435,7 @@ static const char * const sd2_groups[] = {
 static const char * const i2c0_groups[] = {
        "uart0_rx_mfp",
        "uart0_tx_mfp",
-       "i2c0_mfp_mfp",
+       "i2c0_mfp",
 };
 
 static const char * const i2c1_groups[] = {
index b409642f168d6abb0f88c4f457442f5203f7465b..9b821c9cbd16a2f699a997e79bdd239abad18923 100644 (file)
@@ -1286,6 +1286,7 @@ static const struct gpio_chip byt_gpio_chip = {
        .direction_output       = byt_gpio_direction_output,
        .get                    = byt_gpio_get,
        .set                    = byt_gpio_set,
+       .set_config             = gpiochip_generic_config,
        .dbg_show               = byt_gpio_dbg_show,
 };
 
index 4c74fdde576d0b836e0738c953f94a34e4827629..1093a6105d40cc2d53eca053e51a7c1bc77027b9 100644 (file)
@@ -1479,11 +1479,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
        struct irq_chip *chip = irq_desc_get_chip(desc);
        unsigned long pending;
+       unsigned long flags;
        u32 intr_line;
 
        chained_irq_enter(chip, desc);
 
+       raw_spin_lock_irqsave(&chv_lock, flags);
        pending = readl(pctrl->regs + CHV_INTSTAT);
+       raw_spin_unlock_irqrestore(&chv_lock, flags);
+
        for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
                unsigned int irq, offset;
 
index 330c8f077b73aa5db918f50f8ae79c2a7f9ee80c..4d7a86a5a37b0239c0f7871875f6251846cd4335 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define SPT_PAD_OWN    0x020
-#define SPT_PADCFGLOCK 0x0a0
-#define SPT_HOSTSW_OWN 0x0d0
-#define SPT_GPI_IS     0x100
-#define SPT_GPI_IE     0x120
+#define SPT_PAD_OWN            0x020
+#define SPT_H_PADCFGLOCK       0x090
+#define SPT_LP_PADCFGLOCK      0x0a0
+#define SPT_HOSTSW_OWN         0x0d0
+#define SPT_GPI_IS             0x100
+#define SPT_GPI_IE             0x120
 
 #define SPT_COMMUNITY(b, s, e)                         \
        {                                               \
                .barno = (b),                           \
                .padown_offset = SPT_PAD_OWN,           \
-               .padcfglock_offset = SPT_PADCFGLOCK,    \
+               .padcfglock_offset = SPT_LP_PADCFGLOCK, \
                .hostown_offset = SPT_HOSTSW_OWN,       \
                .is_offset = SPT_GPI_IS,                \
                .ie_offset = SPT_GPI_IE,                \
@@ -47,7 +48,7 @@
        {                                               \
                .barno = (b),                           \
                .padown_offset = SPT_PAD_OWN,           \
-               .padcfglock_offset = SPT_PADCFGLOCK,    \
+               .padcfglock_offset = SPT_H_PADCFGLOCK,  \
                .hostown_offset = SPT_HOSTSW_OWN,       \
                .is_offset = SPT_GPI_IS,                \
                .ie_offset = SPT_GPI_IE,                \
index 3853ec3a2a8e683481e1b9e864c61c8a22f34d74..ee305f14040043f33a2e2c31486c6c9da33756f2 100644 (file)
@@ -164,8 +164,6 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
        case MTK_PIN_CONFIG_PU_ADV:
        case MTK_PIN_CONFIG_PD_ADV:
                if (hw->soc->adv_pull_get) {
-                       bool pullup;
-
                        pullup = param == MTK_PIN_CONFIG_PU_ADV;
                        err = hw->soc->adv_pull_get(hw, desc, pullup, &ret);
                } else
index 9a398a211d30a4db31ae07b6db09f8512019fb07..85858c1d56d02ebaadec8511877993d858ab2cb5 100644 (file)
@@ -697,7 +697,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
 
                pol = msm_readl_intr_cfg(pctrl, g);
                pol ^= BIT(g->intr_polarity_bit);
-               msm_writel_intr_cfg(val, pctrl, g);
+               msm_writel_intr_cfg(pol, pctrl, g);
 
                val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
                intstat = msm_readl_intr_status(pctrl, g);
@@ -1034,6 +1034,29 @@ static void msm_gpio_irq_relres(struct irq_data *d)
        module_put(gc->owner);
 }
 
+static int msm_gpio_irq_set_affinity(struct irq_data *d,
+                               const struct cpumask *dest, bool force)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+       if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+               return irq_chip_set_affinity_parent(d, dest, force);
+
+       return 0;
+}
+
+static int msm_gpio_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+       if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+               return irq_chip_set_vcpu_affinity_parent(d, vcpu_info);
+
+       return 0;
+}
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
        struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -1132,6 +1155,8 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
        pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
        pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+       pctrl->irq_chip.irq_set_affinity = msm_gpio_irq_set_affinity;
+       pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
 
        np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
        if (np) {
index b7f2c00db5e1e423f7ed4d3d7927da0685c29f18..9c4af76a9956ed567752f6920ec8db3671b2ab35 100644 (file)
@@ -52,28 +52,15 @@ static int cros_ec_sensorhub_register(struct device *dev,
        int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
        struct cros_ec_command *msg = sensorhub->msg;
        struct cros_ec_dev *ec = sensorhub->ec;
-       int ret, i, sensor_num;
+       int ret, i;
        char *name;
 
-       sensor_num = cros_ec_get_sensor_count(ec);
-       if (sensor_num < 0) {
-               dev_err(dev,
-                       "Unable to retrieve sensor information (err:%d)\n",
-                       sensor_num);
-               return sensor_num;
-       }
-
-       sensorhub->sensor_num = sensor_num;
-       if (sensor_num == 0) {
-               dev_err(dev, "Zero sensors reported.\n");
-               return -EINVAL;
-       }
 
        msg->version = 1;
        msg->insize = sizeof(struct ec_response_motion_sense);
        msg->outsize = sizeof(struct ec_params_motion_sense);
 
-       for (i = 0; i < sensor_num; i++) {
+       for (i = 0; i < sensorhub->sensor_num; i++) {
                sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
                sensorhub->params->info.sensor_num = i;
 
@@ -140,8 +127,7 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
        struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
        struct cros_ec_sensorhub *data;
        struct cros_ec_command *msg;
-       int ret;
-       int i;
+       int ret, i, sensor_num;
 
        msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
                           max((u16)sizeof(struct ec_params_motion_sense),
@@ -166,10 +152,52 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
        dev_set_drvdata(dev, data);
 
        /* Check whether this EC is a sensor hub. */
-       if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) {
+               sensor_num = cros_ec_get_sensor_count(ec);
+               if (sensor_num < 0) {
+                       dev_err(dev,
+                               "Unable to retrieve sensor information (err:%d)\n",
+                               sensor_num);
+                       return sensor_num;
+               }
+               if (sensor_num == 0) {
+                       dev_err(dev, "Zero sensors reported.\n");
+                       return -EINVAL;
+               }
+               data->sensor_num = sensor_num;
+
+               /*
+                * Prepare the ring handler before enumering the
+                * sensors.
+                */
+               if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+                       ret = cros_ec_sensorhub_ring_allocate(data);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Enumerate the sensors.*/
                ret = cros_ec_sensorhub_register(dev, data);
                if (ret)
                        return ret;
+
+               /*
+                * When the EC does not have a FIFO, the sensors will query
+                * their data themselves via sysfs or a software trigger.
+                */
+               if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+                       ret = cros_ec_sensorhub_ring_add(data);
+                       if (ret)
+                               return ret;
+                       /*
+                        * The msg and its data is not under the control of the
+                        * ring handler.
+                        */
+                       return devm_add_action_or_reset(dev,
+                                       cros_ec_sensorhub_ring_remove,
+                                       data);
+               }
+
        } else {
                /*
                 * If the device has sensors but does not claim to
@@ -184,22 +212,6 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
                }
        }
 
-       /*
-        * If the EC does not have a FIFO, the sensors will query their data
-        * themselves via sysfs or a software trigger.
-        */
-       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
-               ret = cros_ec_sensorhub_ring_add(data);
-               if (ret)
-                       return ret;
-               /*
-                * The msg and its data is not under the control of the ring
-                * handler.
-                */
-               return devm_add_action_or_reset(dev,
-                                               cros_ec_sensorhub_ring_remove,
-                                               data);
-       }
 
        return 0;
 }
index c48e5b38a4417d27daee2efd6c42a71fd17b80f9..24e48d96ed7663f3f8fda74b7ace2b71c93181f1 100644 (file)
@@ -957,17 +957,15 @@ static int cros_ec_sensorhub_event(struct notifier_block *nb,
 }
 
 /**
- * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
- *                               supports it.
+ * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC
+ *                                    supports it.
  *
  * @sensorhub : Sensor Hub object.
  *
  * Return: 0 on success.
  */
-int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub)
 {
-       struct cros_ec_dev *ec = sensorhub->ec;
-       int ret;
        int fifo_info_length =
                sizeof(struct ec_response_motion_sense_fifo_info) +
                sizeof(u16) * sensorhub->sensor_num;
@@ -978,6 +976,49 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
        if (!sensorhub->fifo_info)
                return -ENOMEM;
 
+       /*
+        * Allocate the callback area based on the number of sensors.
+        * Add one for the sensor ring.
+        */
+       sensorhub->push_data = devm_kcalloc(sensorhub->dev,
+                       sensorhub->sensor_num,
+                       sizeof(*sensorhub->push_data),
+                       GFP_KERNEL);
+       if (!sensorhub->push_data)
+               return -ENOMEM;
+
+       sensorhub->tight_timestamps = cros_ec_check_features(
+                       sensorhub->ec,
+                       EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+       if (sensorhub->tight_timestamps) {
+               sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
+                               sensorhub->sensor_num,
+                               sizeof(*sensorhub->batch_state),
+                               GFP_KERNEL);
+               if (!sensorhub->batch_state)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
+ *                               supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+{
+       struct cros_ec_dev *ec = sensorhub->ec;
+       int ret;
+       int fifo_info_length =
+               sizeof(struct ec_response_motion_sense_fifo_info) +
+               sizeof(u16) * sensorhub->sensor_num;
+
        /* Retrieve FIFO information */
        sensorhub->msg->version = 2;
        sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
@@ -998,31 +1039,9 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
        if (!sensorhub->ring)
                return -ENOMEM;
 
-       /*
-        * Allocate the callback area based on the number of sensors.
-        */
-       sensorhub->push_data = devm_kcalloc(
-                       sensorhub->dev, sensorhub->sensor_num,
-                       sizeof(*sensorhub->push_data),
-                       GFP_KERNEL);
-       if (!sensorhub->push_data)
-               return -ENOMEM;
-
        sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
                cros_ec_get_time_ns();
 
-       sensorhub->tight_timestamps = cros_ec_check_features(
-                       ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
-
-       if (sensorhub->tight_timestamps) {
-               sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
-                               sensorhub->sensor_num,
-                               sizeof(*sensorhub->batch_state),
-                               GFP_KERNEL);
-               if (!sensorhub->batch_state)
-                       return -ENOMEM;
-       }
-
        /* Register the notifier that will act as a top half interrupt. */
        sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
        ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
index 6f12747a359a286a8a5fcfd33440264e75e17de9..c4404d9c1de4ff3fd1eac24991de546a04f09778 100644 (file)
@@ -515,9 +515,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
        .detect_quirks = asus_nb_wmi_quirks,
 };
 
+static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
+       {
+               /*
+                * asus-nb-wm adds no functionality. The T100TA has a detachable
+                * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
+                * asus-nb-wm causes the camera LED to turn and _stay_ on.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+               },
+       },
+       {
+               /* The Asus T200TA has the same issue as the T100TA */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+               },
+       },
+       {} /* Terminating entry */
+};
 
 static int __init asus_nb_wmi_init(void)
 {
+       if (dmi_check_system(asus_nb_wmi_blacklist))
+               return -ENODEV;
+
        return asus_wmi_register_driver(&asus_nb_wmi_driver);
 }
 
index b96d172eb2c15e04471cbbe80cd3da17b1c01061..12d5ab7e1f5d8eb3cfdf2c97ea87a003715e6e5a 100644 (file)
@@ -53,7 +53,7 @@ static int uncore_max_entries __read_mostly;
 /* Storage for uncore data for all instances */
 static struct uncore_data *uncore_instances;
 /* Root of the all uncore sysfs kobjs */
-struct kobject *uncore_root_kobj;
+static struct kobject *uncore_root_kobj;
 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
 static cpumask_t uncore_cpu_mask;
 /* CPU online callback register instance */
index d2a5d4c367152553799a3b152f4c7e334bc8f179..7c8bdab078cf5e56ed330055f984e96242475f7f 100644 (file)
@@ -255,7 +255,7 @@ static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
 };
 
 static const struct pmc_bit_map icl_pfear_map[] = {
-       /* Ice Lake generation onwards only */
+       /* Ice Lake and Jasper Lake generation onwards only */
        {"RES_65",              BIT(0)},
        {"RES_66",              BIT(1)},
        {"RES_67",              BIT(2)},
@@ -274,7 +274,7 @@ static const struct pmc_bit_map *ext_icl_pfear_map[] = {
 };
 
 static const struct pmc_bit_map tgl_pfear_map[] = {
-       /* Tiger Lake, Elkhart Lake and Jasper Lake generation onwards only */
+       /* Tiger Lake and Elkhart Lake generation onwards only */
        {"PSF9",                BIT(0)},
        {"RES_66",              BIT(1)},
        {"RES_67",              BIT(2)},
@@ -692,7 +692,6 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
        kfree(lpm_regs);
 }
 
-#if IS_ENABLED(CONFIG_DEBUG_FS)
 static bool slps0_dbg_latch;
 
 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
@@ -1133,15 +1132,6 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
                                    &pmc_core_substate_l_sts_regs_fops);
        }
 }
-#else
-static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
-}
-
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
 
 static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,           &spt_reg_map),
@@ -1156,7 +1146,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        &tgl_reg_map),
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &tgl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &icl_reg_map),
        {}
 };
 
@@ -1260,13 +1250,11 @@ static int pmc_core_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-
 static bool warn_on_s0ix_failures;
 module_param(warn_on_s0ix_failures, bool, 0644);
 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
 
-static int pmc_core_suspend(struct device *dev)
+static __maybe_unused int pmc_core_suspend(struct device *dev)
 {
        struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 
@@ -1318,7 +1306,7 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
        return false;
 }
 
-static int pmc_core_resume(struct device *dev)
+static __maybe_unused int pmc_core_resume(struct device *dev)
 {
        struct pmc_dev *pmcdev = dev_get_drvdata(dev);
        const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
@@ -1348,8 +1336,6 @@ static int pmc_core_resume(struct device *dev)
        return 0;
 }
 
-#endif
-
 static const struct dev_pm_ops pmc_core_pm_ops = {
        SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
 };
index 0d50b2402abe5c5d81402b75b4fd2ba16d7be421..5eae55d8022665960ab96ff0fec8e83c680b1a00 100644 (file)
@@ -282,9 +282,7 @@ struct pmc_dev {
        u32 base_addr;
        void __iomem *regbase;
        const struct pmc_reg_map *map;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
        int pmc_xram_read_bit;
        struct mutex lock; /* generic mutex lock for PMC Core */
 
index 946ac2dc08ae7c3e2691b47d5f09594368440144..cc4f9cba68563c137c1c71785e8d6c26479ef2c6 100644 (file)
@@ -522,8 +522,8 @@ static int mshw0011_probe(struct i2c_client *client)
        strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
 
        bat0 = i2c_acpi_new_device(dev, 1, &board_info);
-       if (!bat0)
-               return -ENOMEM;
+       if (IS_ERR(bat0))
+               return PTR_ERR(bat0);
 
        data->bat0 = bat0;
        i2c_set_clientdata(bat0, data);
index 8eaadbaf8ffa5281d6db62f9f9152c783e0c69c8..0f704484ae1d6b7a4d6fc7abb08fb3f268d1dce1 100644 (file)
@@ -9548,7 +9548,7 @@ static ssize_t tpacpi_battery_store(int what,
                if (!battery_info.batteries[battery].start_support)
                        return -ENODEV;
                /* valid values are [0, 99] */
-               if (value < 0 || value > 99)
+               if (value > 99)
                        return -EINVAL;
                if (value > battery_info.batteries[battery].charge_stop)
                        return -EINVAL;
index 601cbb282f54317e823096adcaec309b891dbed1..54a2546bb93bf847b06ea5377053076306fab47b 100644 (file)
@@ -23,7 +23,7 @@ struct xiaomi_wmi {
        unsigned int key_code;
 };
 
-int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
 {
        struct xiaomi_wmi *data;
 
@@ -48,7 +48,7 @@ int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
        return input_register_device(data->input_dev);
 }
 
-void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
 {
        struct xiaomi_wmi *data;
 
index 8155f59ece38d08eba6dbb8079ff880f6b482ec5..10af330153b5ec179abc473f43f84661b825b3fc 100644 (file)
@@ -877,6 +877,11 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
                                rmcd_error("pinned %ld out of %ld pages",
                                           pinned, nr_pages);
                        ret = -EFAULT;
+                       /*
+                        * Set nr_pages up to mean "how many pages to unpin, in
+                        * the error handler:
+                        */
+                       nr_pages = pinned;
                        goto err_pg;
                }
 
index c340505150b685a4048bb2a62da5c717e4abbfe3..7486f6e4e613ce15e2df9bdd05503dc47ab0dd93 100644 (file)
@@ -5754,10 +5754,6 @@ static DECLARE_DELAYED_WORK(regulator_init_complete_work,
 
 static int __init regulator_init_complete(void)
 {
-       int delay = driver_deferred_probe_timeout;
-
-       if (delay < 0)
-               delay = 0;
        /*
         * Since DT doesn't provide an idiomatic mechanism for
         * enabling full constraints and since it's much more natural
@@ -5768,17 +5764,18 @@ static int __init regulator_init_complete(void)
                has_full_constraints = true;
 
        /*
-        * If driver_deferred_probe_timeout is set, we punt
-        * completion for that many seconds since systems like
-        * distros will load many drivers from userspace so consumers
-        * might not always be ready yet, this is particularly an
-        * issue with laptops where this might bounce the display off
-        * then on.  Ideally we'd get a notification from userspace
-        * when this happens but we don't so just wait a bit and hope
-        * we waited long enough.  It'd be better if we'd only do
-        * this on systems that need it.
+        * We punt completion for an arbitrary amount of time since
+        * systems like distros will load many drivers from userspace
+        * so consumers might not always be ready yet, this is
+        * particularly an issue with laptops where this might bounce
+        * the display off then on.  Ideally we'd get a notification
+        * from userspace when this happens but we don't so just wait
+        * a bit and hope we waited long enough.  It'd be better if
+        * we'd only do this on systems that need it, and a kernel
+        * command line option might be useful.
         */
-       schedule_delayed_work(&regulator_init_complete_work, delay * HZ);
+       schedule_delayed_work(&regulator_init_complete_work,
+                             msecs_to_jiffies(30000));
 
        return 0;
 }
index c75112ee7b97af26a01238305fdb1b3f9045e057..c7fade836d833f87f46ca4f626a92f05e72954c7 100644 (file)
@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
                                   ISM_NR_DMBS);
-       if (!ism->smcd)
+       if (!ism->smcd) {
+               ret = -ENOMEM;
                goto err_resource;
+       }
 
        ism->smcd->priv = ism;
        ret = ism_dev_init(ism);
index f7689461c242cf3f75c99380fe15e92fb9b30c0f..569966bdc5138eef07d6bfcc90fe2c8d94b352c5 100644 (file)
@@ -6717,17 +6717,17 @@ int qeth_stop(struct net_device *dev)
                unsigned int i;
 
                /* Quiesce the NAPI instances: */
-               qeth_for_each_output_queue(card, queue, i) {
+               qeth_for_each_output_queue(card, queue, i)
                        napi_disable(&queue->napi);
-                       del_timer_sync(&queue->timer);
-               }
 
                /* Stop .ndo_start_xmit, might still access queue->napi. */
                netif_tx_disable(dev);
 
-               /* Queues may get re-allocated, so remove the NAPIs here. */
-               qeth_for_each_output_queue(card, queue, i)
+               qeth_for_each_output_queue(card, queue, i) {
+                       del_timer_sync(&queue->timer);
+                       /* Queues may get re-allocated, so remove the NAPIs. */
                        netif_napi_del(&queue->napi);
+               }
        } else {
                netif_tx_disable(dev);
        }
index 7da9e060b27063b0948b05e9ad836b0581a43c4a..635f6f9cffc4029173af9c2a07a3021cfb1521d3 100644 (file)
@@ -3640,6 +3640,11 @@ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
        struct ibmvfc_host *vhost = tgt->vhost;
        struct ibmvfc_event *evt;
 
+       if (!vhost->logged_in) {
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               return;
+       }
+
        if (vhost->discovery_threads >= disc_threads)
                return;
 
index 7f66a778320994908bbc101e344b9840895c5e35..59f0f1030c54aaf244c287932a5d29f082e449bd 100644 (file)
@@ -2320,16 +2320,12 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
-       unsigned long flags;
 
        srp_remove_host(hostdata->host);
        scsi_remove_host(hostdata->host);
 
        purge_requests(hostdata, DID_ERROR);
-
-       spin_lock_irqsave(hostdata->host->host_lock, flags);
        release_event_pool(&hostdata->pool, hostdata);
-       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
 
        ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
                                        max_events);
index 97cabd7e001480092fe650acce5e53483cfe0f37..2c9e5ac24692de8ed5bf9d3d16b0c3bd4c36d17b 100644 (file)
@@ -1850,9 +1850,6 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
        }
 
-       ql_log(ql_log_info, vha, 0x70d6,
-           "port speed:%d\n", ha->link_data_rate);
-
        return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
 }
 
@@ -3031,11 +3028,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
                msleep(1000);
 
-       qla_nvme_delete(vha);
 
        qla24xx_disable_vp(vha);
        qla2x00_wait_for_sess_deletion(vha);
 
+       qla_nvme_delete(vha);
        vha->flags.delete_progress = 1;
 
        qlt_remove_target(ha, vha);
index 4ed90437e8c42e9f5c33200774a78e25bbcc7fc5..d6c991bd1bde9bc024c4c907589b756b2b5c7ad5 100644 (file)
@@ -3153,7 +3153,7 @@ qla24xx_abort_command(srb_t *sp)
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
            "Entered %s.\n", __func__);
 
-       if (vha->flags.qpairs_available && sp->qpair)
+       if (sp->qpair)
                req = sp->qpair->req;
        else
                return QLA_FUNCTION_FAILED;
index d190db5ea7d9e33eae1ef649982fa49ad26b1aaa..1d9a4866f9a74259381d924b62f52dd91540e738 100644 (file)
@@ -3732,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       /*
+        * if UNLOADING flag is already set, then continue unload,
+        * where it was set first.
+        */
+       if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+               return;
+
        if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
            IS_QLA28XX(ha)) {
                if (ha->flags.fw_started)
@@ -3750,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_wait_for_sess_deletion(base_vha);
 
-       /*
-        * if UNLOAD flag is already set, then continue unload,
-        * where it was set first.
-        */
-       if (test_bit(UNLOADING, &base_vha->dpc_flags))
-               return;
-
-       set_bit(UNLOADING, &base_vha->dpc_flags);
-
        qla_nvme_delete(base_vha);
 
        dma_free_coherent(&ha->pdev->dev,
@@ -4864,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
        struct qla_work_evt *e;
        uint8_t bail;
 
+       if (test_bit(UNLOADING, &vha->dpc_flags))
+               return NULL;
+
        QLA_VHA_MARK_BUSY(vha, bail);
        if (bail)
                return NULL;
@@ -6628,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
        struct pci_dev *pdev = ha->pdev;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-       /*
-        * if UNLOAD flag is already set, then continue unload,
-        * where it was set first.
-        */
-       if (test_bit(UNLOADING, &base_vha->dpc_flags))
-               return;
-
        ql_log(ql_log_warn, base_vha, 0x015b,
            "Disabling adapter.\n");
 
@@ -6645,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
                return;
        }
 
-       qla2x00_wait_for_sess_deletion(base_vha);
+       /*
+        * if UNLOADING flag is already set, then continue unload,
+        * where it was set first.
+        */
+       if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+               return;
 
-       set_bit(UNLOADING, &base_vha->dpc_flags);
+       qla2x00_wait_for_sess_deletion(base_vha);
 
        qla2x00_delete_all_vps(ha, base_vha);
 
index 47835c4b4ee05016420f07f0bf63c015879e5009..06c260f6cdae3f43fc8de4ced5d5b98a0d36bac9 100644 (file)
@@ -2284,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
                switch (oldstate) {
                case SDEV_RUNNING:
                case SDEV_CREATED_BLOCK:
+               case SDEV_QUIESCE:
                case SDEV_OFFLINE:
                        break;
                default:
index 3717eea37ecb386fd772e6dd90182b3674d41ceb..5f0ad8b32e3af5900b571ae3ffa828f5e85eb6fb 100644 (file)
@@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
        dev_dbg(dev, "scsi resume: %d\n", err);
 
        if (err == 0) {
+               bool was_runtime_suspended;
+
+               was_runtime_suspended = pm_runtime_suspended(dev);
+
                pm_runtime_disable(dev);
                err = pm_runtime_set_active(dev);
                pm_runtime_enable(dev);
@@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev,
                 */
                if (!err && scsi_is_sdev_device(dev)) {
                        struct scsi_device *sdev = to_scsi_device(dev);
-
-                       blk_set_runtime_active(sdev->request_queue);
+                       if (was_runtime_suspended)
+                               blk_post_runtime_resume(sdev->request_queue, 0);
+                       else
+                               blk_set_runtime_active(sdev->request_queue);
                }
        }
 
index db37144ae98c69a8ae0cb20ca8695c5150e53346..87ee9f767b7aff873a3e4284a91abaf90ac31ad2 100644 (file)
@@ -351,7 +351,9 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
                spin_unlock_irqrestore(&client->lock, flags);
        }
 
-       mbox_send_message(client->chan, pkt);
+       err = mbox_send_message(client->chan, pkt);
+       if (err < 0)
+               return err;
        /* We can send next packet immediately, so just call txdone. */
        mbox_client_txdone(client->chan, 0);
 
index 8e0575fcb4c8c72c2efad3bcfe79e880816a94df..67325fbaf7605af34a8fc7a28991e402f73df0a9 100644 (file)
@@ -925,6 +925,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
                gasket_get_bar_index(gasket_dev,
                                     (vma->vm_pgoff << PAGE_SHIFT) +
                                     driver_desc->legacy_mmap_address_offset);
+
+       if (bar_index < 0)
+               return DO_MAP_REGION_INVALID;
+
        phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
        while (mapped_bytes < map_length) {
                /*
index 55c51143bb0940998b19396a56b39cba3bbd7447..4ffb334cd5cde56f5011692dc4fa83ae1ca27bda 100644 (file)
@@ -537,9 +537,9 @@ static void gb_tty_set_termios(struct tty_struct *tty,
        }
 
        if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
-               newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
+               newline.flow_control = GB_SERIAL_AUTO_RTSCTS_EN;
        else
-               newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
+               newline.flow_control = 0;
 
        if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
                memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
index 4b25a3a314edbc23af483024335e89251834e388..ed404355ea4c6f9978c941a953a014dd21f4f4fe 100644 (file)
@@ -130,17 +130,24 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
 static int ad2s1210_config_read(struct ad2s1210_state *st,
                                unsigned char address)
 {
-       struct spi_transfer xfer = {
-               .len = 2,
-               .rx_buf = st->rx,
-               .tx_buf = st->tx,
+       struct spi_transfer xfers[] = {
+               {
+                       .len = 1,
+                       .rx_buf = &st->rx[0],
+                       .tx_buf = &st->tx[0],
+                       .cs_change = 1,
+               }, {
+                       .len = 1,
+                       .rx_buf = &st->rx[1],
+                       .tx_buf = &st->tx[1],
+               },
        };
        int ret = 0;
 
        ad2s1210_set_mode(MOD_CONFIG, st);
        st->tx[0] = address | AD2S1210_MSB_IS_HIGH;
        st->tx[1] = AD2S1210_REG_FAULT;
-       ret = spi_sync_transfer(st->sdev, &xfer, 1);
+       ret = spi_sync_transfer(st->sdev, xfers, 2);
        if (ret < 0)
                return ret;
 
index 7b00d7069e21971963216d645885118f368d4e98..358d7b2f4ad189fdade4f35def7ac1e99f60a003 100644 (file)
@@ -298,7 +298,6 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
 {
        int err = 0;
        struct kp2000_device *pcard;
-       int rv;
        unsigned long reg_bar_phys_addr;
        unsigned long reg_bar_phys_len;
        unsigned long dma_bar_phys_addr;
@@ -445,11 +444,11 @@ static int kp2000_pcie_probe(struct pci_dev *pdev,
        if (err < 0)
                goto err_release_dma;
 
-       rv = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
-                        pcard->name, pcard);
-       if (rv) {
+       err = request_irq(pcard->pdev->irq, kp2000_irq_handler, IRQF_SHARED,
+                         pcard->name, pcard);
+       if (err) {
                dev_err(&pcard->pdev->dev,
-                       "%s: failed to request_irq: %d\n", __func__, rv);
+                       "%s: failed to request_irq: %d\n", __func__, err);
                goto err_disable_msi;
        }
 
index 87a6dac4890dc4ffe79d31e222379bfa7846fd35..ab6f39175d992ab07cfb46572e5f2f76f319b5ac 100644 (file)
@@ -30,5 +30,4 @@ Now the TODOs:
 
 Please send any patches to:
 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Wolfram Sang <wsa@the-dreams.de>
 Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
index 6e1e50048651ed184b0868f7f55b3e329c80ca61..9aa14331affd636730ac0cbe824fffd72b7dd056 100644 (file)
@@ -57,8 +57,10 @@ static int send_scan_req(struct wfx_vif *wvif,
        wvif->scan_abort = false;
        reinit_completion(&wvif->scan_complete);
        timeout = hif_scan(wvif, req, start_idx, i - start_idx);
-       if (timeout < 0)
+       if (timeout < 0) {
+               wfx_tx_unlock(wvif->wdev);
                return timeout;
+       }
        ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
        if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
                hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
index 51ffd5c002dee2da58281ce0e1c2032dac1ad9e2..1c181d31f4c872d0d18290668842654382c6a593 100644 (file)
@@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
                                target_to_linux_sector(dev, cmd->t_task_lba),
                                target_to_linux_sector(dev,
                                        sbc_get_write_same_sectors(cmd)),
-                               GFP_KERNEL, false);
+                               GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
        if (ret)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
index 594b724bbf7976fce43c3f380a5b63a6d9969cd8..264a822c0bfac681c3e0b3da281f8b561204a36d 100644 (file)
@@ -3350,6 +3350,7 @@ static void target_tmr_work(struct work_struct *work)
 
        cmd->se_tfo->queue_tm_rsp(cmd);
 
+       transport_lun_remove_cmd(cmd);
        transport_cmd_check_stop_to_fabric(cmd);
        return;
 
index 3d084cec136f552a1048df38be78b6c0e6dfb573..50c7534ba31e9ac65b03d55d7c42de93dcec1341 100644 (file)
@@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
                return ret;
 
        ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+       if (ret)
+               return ret;
+
        if (val & ROUTER_CS_26_ONS)
                return -EOPNOTSUPP;
 
index 31b7e1b0374916744f3ec867c49d07b0d0dc00dd..d1b27b0522a3cde22e9ecb1ebc943a5a743c102f 100644 (file)
@@ -88,7 +88,7 @@ config HVC_DCC
 
 config HVC_RISCV_SBI
        bool "RISC-V SBI console support"
-       depends on RISCV_SBI
+       depends on RISCV_SBI_V01
        select HVC_DRIVER
        help
          This enables support for console output via RISC-V SBI calls, which
index 0aea76cd67ff2e72aaea9d339180badaa2f39d4b..adf9e80e7dc9e45fa956fe8940f2f5c20709c530 100644 (file)
@@ -86,7 +86,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
 
 config SERIAL_EARLYCON_RISCV_SBI
        bool "Early console using RISC-V SBI"
-       depends on RISCV_SBI
+       depends on RISCV_SBI_V01
        select SERIAL_CORE
        select SERIAL_CORE_CONSOLE
        select SERIAL_EARLYCON
index ed0aa5c0d9b79829f26e501bab29a033369534e0..5674da2b76f0f23b4681f181aed82aab347abc98 100644 (file)
@@ -843,10 +843,8 @@ static int bcm_uart_probe(struct platform_device *pdev)
        if (IS_ERR(clk) && pdev->dev.of_node)
                clk = of_clk_get(pdev->dev.of_node, 0);
 
-       if (IS_ERR(clk)) {
-               clk_put(clk);
+       if (IS_ERR(clk))
                return -ENODEV;
-       }
 
        port->iotype = UPIO_MEM;
        port->irq = res_irq->start;
index 13eadcb8aec4e1554cca30ad399577cf1dace2ae..0b5110dad051085639bc5550f63a6ce52e720d32 100644 (file)
@@ -883,6 +883,7 @@ console_initcall(sifive_console_init);
 
 static void __ssp_add_console_port(struct sifive_serial_port *ssp)
 {
+       spin_lock_init(&ssp->port.lock);
        sifive_serial_console_ports[ssp->port.line] = ssp;
 }
 
index ac137b6a1dc124afc0aa8e0b13f0584121a34ecc..35e9e8faf8de90f927dfc23851e8387337103b9f 100644 (file)
@@ -1459,6 +1459,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
                cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
                cdns_uart_uart_driver.cons = &cdns_uart_console;
+               cdns_uart_console.index = id;
 #endif
 
                rc = uart_register_driver(&cdns_uart_uart_driver);
index e5ffed795e4cf4bc142c53c1717775cfb3a1a6d2..48a8199f7845dff3d4cefd892db27257962d18a6 100644 (file)
@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
        return uniscr;
 }
 
+static void vc_uniscr_free(struct uni_screen *uniscr)
+{
+       vfree(uniscr);
+}
+
 static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
 {
-       vfree(vc->vc_uni_screen);
+       vc_uniscr_free(vc->vc_uni_screen);
        vc->vc_uni_screen = new_uniscr;
 }
 
@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
        err = resize_screen(vc, new_cols, new_rows, user);
        if (err) {
                kfree(newscreen);
-               kfree(new_uniscr);
+               vc_uniscr_free(new_uniscr);
                return err;
        }
 
index 372460ea4df9afacd43be9236e5fa6b06176623a..4d43f3b28309d36a95de8968911b0028e8ef5771 100644 (file)
@@ -82,7 +82,7 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
  * @ptr: address of device controller register to be read and changed
  * @mask: bits requested to clar
  */
-void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
+static void cdns3_clear_register_bit(void __iomem *ptr, u32 mask)
 {
        mask = readl(ptr) & ~mask;
        writel(mask, ptr);
@@ -137,7 +137,7 @@ struct usb_request *cdns3_next_request(struct list_head *list)
  *
  * Returns buffer or NULL if no buffers in list
  */
-struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
+static struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
 {
        return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
 }
@@ -148,7 +148,7 @@ struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
  *
  * Returns request or NULL if no requests in list
  */
-struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
+static struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
 {
        return list_first_entry_or_null(list, struct cdns3_request, list);
 }
@@ -190,7 +190,7 @@ dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
        return priv_ep->trb_pool_dma + offset;
 }
 
-int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
+static int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
 {
        switch (priv_ep->type) {
        case USB_ENDPOINT_XFER_ISOC:
@@ -345,7 +345,7 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
        cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
 }
 
-void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
+static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
 {
        struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
        int current_trb = priv_req->start_trb;
@@ -511,7 +511,7 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
        }
 }
 
-struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
+static struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
                                              struct cdns3_endpoint *priv_ep,
                                              struct cdns3_request *priv_req)
 {
@@ -551,7 +551,7 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
        return &priv_req->request;
 }
 
-int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
+static int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
                              struct cdns3_endpoint *priv_ep,
                              struct cdns3_request *priv_req)
 {
@@ -836,7 +836,7 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
                cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
 }
 
-void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
+static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
 {
        /* Work around for stale data address in TRB*/
        if (priv_ep->wa1_set) {
@@ -1904,7 +1904,7 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
        return 0;
 }
 
-void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
+static void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
                              struct cdns3_endpoint *priv_ep)
 {
        if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER)
@@ -1925,7 +1925,7 @@ void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev,
                               EP_CFG_TDL_CHK | EP_CFG_SID_CHK);
 }
 
-void cdns3_configure_dmult(struct cdns3_device *priv_dev,
+static void cdns3_configure_dmult(struct cdns3_device *priv_dev,
                           struct cdns3_endpoint *priv_ep)
 {
        struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
@@ -2548,7 +2548,7 @@ found:
        link_trb = priv_req->trb;
 
        /* Update ring only if removed request is on pending_req_list list */
-       if (req_on_hw_ring) {
+       if (req_on_hw_ring && link_trb) {
                link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
                        ((priv_req->end_trb + 1) * TRB_SIZE));
                link_trb->control = (link_trb->control & TRB_CYCLE) |
index af648ba6544d840c1fd2712f7332582c8a6f65b8..46105457e1caaee8aa723729a9f3b070caad62d5 100644 (file)
@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
                        hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
                                        HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
 
-               if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
+               if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
                        hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
                                        HS_PHY_SESS_VLD_CTRL_EN,
                                        HS_PHY_SESS_VLD_CTRL_EN);
index 6833c918abcee7b46bf076f70d6fcce8a740ba7c..d93d94d7ff50605afddfb35afc360d5f2c88ff62 100644 (file)
@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct usb_memory *usbm = NULL;
        struct usb_dev_state *ps = file->private_data;
+       struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
        size_t size = vma->vm_end - vma->vm_start;
        void *mem;
        unsigned long flags;
@@ -250,11 +251,19 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
        usbm->vma_use_count = 1;
        INIT_LIST_HEAD(&usbm->memlist);
 
-       if (remap_pfn_range(vma, vma->vm_start,
-                       virt_to_phys(usbm->mem) >> PAGE_SHIFT,
-                       size, vma->vm_page_prot) < 0) {
-               dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
-               return -EAGAIN;
+       if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+               if (remap_pfn_range(vma, vma->vm_start,
+                                   virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+                                   size, vma->vm_page_prot) < 0) {
+                       dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+                       return -EAGAIN;
+               }
+       } else {
+               if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle,
+                                     size)) {
+                       dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+                       return -EAGAIN;
+               }
        }
 
        vma->vm_flags |= VM_IO;
index 2b6565c06c237d43f22493a43be39272f0355aef..fc748c731832d8eea2ab7b5428887f5c103be86b 100644 (file)
@@ -39,6 +39,7 @@
 
 #define USB_VENDOR_GENESYS_LOGIC               0x05e3
 #define USB_VENDOR_SMSC                                0x0424
+#define USB_PRODUCT_USB5534B                   0x5534
 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
 #define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
 
@@ -5621,8 +5622,11 @@ out_hdev_lock:
 }
 
 static const struct usb_device_id hub_id_table[] = {
-    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                   | USB_DEVICE_ID_MATCH_PRODUCT
+                   | USB_DEVICE_ID_MATCH_INT_CLASS,
       .idVendor = USB_VENDOR_SMSC,
+      .idProduct = USB_PRODUCT_USB5534B,
       .bInterfaceClass = USB_CLASS_HUB,
       .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
     { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
index a48678a0c83ac0d0253067e82966c1bc592240a4..6197938dcc2d8f104efbd8e923bbc6e3d0b94c7b 100644 (file)
@@ -1144,11 +1144,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
 
        if (usb_endpoint_out(epaddr)) {
                ep = dev->ep_out[epnum];
-               if (reset_hardware)
+               if (reset_hardware && epnum != 0)
                        dev->ep_out[epnum] = NULL;
        } else {
                ep = dev->ep_in[epnum];
-               if (reset_hardware)
+               if (reset_hardware && epnum != 0)
                        dev->ep_in[epnum] = NULL;
        }
        if (ep) {
index 206caa0ea1c6733d690c087229fac91acd3e55db..7a2304565a73262bd1d8ae3e6cd5be611d1044e0 100644 (file)
@@ -4,6 +4,7 @@ config USB_DWC3
        tristate "DesignWare USB3 DRD Core Support"
        depends on (USB || USB_GADGET) && HAS_DMA
        select USB_XHCI_PLATFORM if USB_XHCI_HCD
+       select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
        help
          Say Y or M here if your system has a Dual Role SuperSpeed
          USB controller based on the DesignWare USB3 IP Core.
index 7051611229c983d7ec63f819df8c88bd846e2a48..b67372737dc9b23e37d00b4970bec7ea15c037d2 100644 (file)
@@ -114,6 +114,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
 
 static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("dr_mode", "otg"),
+       PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
        PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
index 00746c2848c060fd1eaa66b9c95612ffe74a9cfd..585cb3deea7ad915093181c51ecd39302be77b17 100644 (file)
@@ -2483,9 +2483,6 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
        for_each_sg(sg, s, pending, i) {
                trb = &dep->trb_pool[dep->trb_dequeue];
 
-               if (trb->ctrl & DWC3_TRB_CTRL_HWO)
-                       break;
-
                req->sg = sg_next(s);
                req->num_pending_sgs--;
 
index 32b637e3e1fa2b5bdccb0ba9f001a96246f751f5..6a9aa4413d64b5043f928f3369eee465c34b79f7 100644 (file)
@@ -260,6 +260,9 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
        char *name;
        int ret;
 
+       if (strlen(page) < len)
+               return -EOVERFLOW;
+
        name = kstrdup(page, GFP_KERNEL);
        if (!name)
                return -ENOMEM;
index dd81fd538cb89c78543f724c99db8e1fd76a3b06..a748ed0842e8af66e330fa68e8f4dd83fd920451 100644 (file)
@@ -300,8 +300,10 @@ static int audio_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail;
+               }
                usb_otg_descriptor_init(cdev->gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index 8d7a556ece30843108c1a797ccf494da4e9e8a57..563363aba48f0599f00c0d260c1797bd7860d67e 100644 (file)
@@ -179,8 +179,10 @@ static int cdc_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail1;
+               }
                usb_otg_descriptor_init(gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index aa0de9e35afaf78e2dd64562b8a54a887e593d77..3afddd3bea6e7197954fdaeedaf41034d9f59345 100644 (file)
@@ -1361,7 +1361,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
 
        req->buf = dev->rbuf;
        req->context = NULL;
-       value = -EOPNOTSUPP;
        switch (ctrl->bRequest) {
 
        case USB_REQ_GET_DESCRIPTOR:
@@ -1784,7 +1783,7 @@ static ssize_t
 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
 {
        struct dev_data         *dev = fd->private_data;
-       ssize_t                 value = len, length = len;
+       ssize_t                 value, length = len;
        unsigned                total;
        u32                     tag;
        char                    *kbuf;
index c61e71ba7045a3e8a996e95be49775a02b6fa46c..0f1b45e3abd1a1ead7b2776be10a2a5747960136 100644 (file)
@@ -156,8 +156,10 @@ static int gncm_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail;
+               }
                usb_otg_descriptor_init(gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index ca7d95bf7397e15364206cc473b712a92e7dd862..e01e366d89cd585cff3252c88e87cbe5691149d7 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/compiler.h>
+#include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/kref.h>
@@ -123,8 +124,6 @@ static void raw_event_queue_destroy(struct raw_event_queue *queue)
 
 struct raw_dev;
 
-#define USB_RAW_MAX_ENDPOINTS 32
-
 enum ep_state {
        STATE_EP_DISABLED,
        STATE_EP_ENABLED,
@@ -134,6 +133,7 @@ struct raw_ep {
        struct raw_dev          *dev;
        enum ep_state           state;
        struct usb_ep           *ep;
+       u8                      addr;
        struct usb_request      *req;
        bool                    urb_queued;
        bool                    disabling;
@@ -168,7 +168,8 @@ struct raw_dev {
        bool                            ep0_out_pending;
        bool                            ep0_urb_queued;
        ssize_t                         ep0_status;
-       struct raw_ep                   eps[USB_RAW_MAX_ENDPOINTS];
+       struct raw_ep                   eps[USB_RAW_EPS_NUM_MAX];
+       int                             eps_num;
 
        struct completion               ep0_done;
        struct raw_event_queue          queue;
@@ -202,8 +203,8 @@ static void dev_free(struct kref *kref)
                usb_ep_free_request(dev->gadget->ep0, dev->req);
        }
        raw_event_queue_destroy(&dev->queue);
-       for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
-               if (dev->eps[i].state != STATE_EP_ENABLED)
+       for (i = 0; i < dev->eps_num; i++) {
+               if (dev->eps[i].state == STATE_EP_DISABLED)
                        continue;
                usb_ep_disable(dev->eps[i].ep);
                usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
@@ -249,12 +250,26 @@ static void gadget_ep0_complete(struct usb_ep *ep, struct usb_request *req)
        complete(&dev->ep0_done);
 }
 
+static u8 get_ep_addr(const char *name)
+{
+       /* If the endpoint has fixed function (named as e.g. "ep12out-bulk"),
+        * parse the endpoint address from its name. We deliberately use
+        * deprecated simple_strtoul() function here, as the number isn't
+        * followed by '\0' nor '\n'.
+        */
+       if (isdigit(name[2]))
+               return simple_strtoul(&name[2], NULL, 10);
+       /* Otherwise the endpoint is configurable (named as e.g. "ep-a"). */
+       return USB_RAW_EP_ADDR_ANY;
+}
+
 static int gadget_bind(struct usb_gadget *gadget,
                        struct usb_gadget_driver *driver)
 {
-       int ret = 0;
+       int ret = 0, i = 0;
        struct raw_dev *dev = container_of(driver, struct raw_dev, driver);
        struct usb_request *req;
+       struct usb_ep *ep;
        unsigned long flags;
 
        if (strcmp(gadget->name, dev->udc_name) != 0)
@@ -273,6 +288,13 @@ static int gadget_bind(struct usb_gadget *gadget,
        dev->req->context = dev;
        dev->req->complete = gadget_ep0_complete;
        dev->gadget = gadget;
+       gadget_for_each_ep(ep, dev->gadget) {
+               dev->eps[i].ep = ep;
+               dev->eps[i].addr = get_ep_addr(ep->name);
+               dev->eps[i].state = STATE_EP_DISABLED;
+               i++;
+       }
+       dev->eps_num = i;
        spin_unlock_irqrestore(&dev->lock, flags);
 
        /* Matches kref_put() in gadget_unbind(). */
@@ -555,7 +577,7 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr,
 
        if (copy_from_user(io, ptr, sizeof(*io)))
                return ERR_PTR(-EFAULT);
-       if (io->ep >= USB_RAW_MAX_ENDPOINTS)
+       if (io->ep >= USB_RAW_EPS_NUM_MAX)
                return ERR_PTR(-EINVAL);
        if (!usb_raw_io_flags_valid(io->flags))
                return ERR_PTR(-EINVAL);
@@ -669,43 +691,61 @@ static int raw_ioctl_ep0_read(struct raw_dev *dev, unsigned long value)
        if (IS_ERR(data))
                return PTR_ERR(data);
        ret = raw_process_ep0_io(dev, &io, data, false);
-       if (ret)
+       if (ret < 0)
                goto free;
 
        length = min(io.length, (unsigned int)ret);
        if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
                ret = -EFAULT;
+       else
+               ret = length;
 free:
        kfree(data);
        return ret;
 }
 
-static bool check_ep_caps(struct usb_ep *ep,
-                               struct usb_endpoint_descriptor *desc)
+static int raw_ioctl_ep0_stall(struct raw_dev *dev, unsigned long value)
 {
-       switch (usb_endpoint_type(desc)) {
-       case USB_ENDPOINT_XFER_ISOC:
-               if (!ep->caps.type_iso)
-                       return false;
-               break;
-       case USB_ENDPOINT_XFER_BULK:
-               if (!ep->caps.type_bulk)
-                       return false;
-               break;
-       case USB_ENDPOINT_XFER_INT:
-               if (!ep->caps.type_int)
-                       return false;
-               break;
-       default:
-               return false;
+       int ret = 0;
+       unsigned long flags;
+
+       if (value)
+               return -EINVAL;
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->state != STATE_DEV_RUNNING) {
+               dev_dbg(dev->dev, "fail, device is not running\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (!dev->gadget) {
+               dev_dbg(dev->dev, "fail, gadget is not bound\n");
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+       if (dev->ep0_urb_queued) {
+               dev_dbg(&dev->gadget->dev, "fail, urb already queued\n");
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+       if (!dev->ep0_in_pending && !dev->ep0_out_pending) {
+               dev_dbg(&dev->gadget->dev, "fail, no request pending\n");
+               ret = -EBUSY;
+               goto out_unlock;
        }
 
-       if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
-               return false;
-       if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
-               return false;
+       ret = usb_ep_set_halt(dev->gadget->ep0);
+       if (ret < 0)
+               dev_err(&dev->gadget->dev,
+                               "fail, usb_ep_set_halt returned %d\n", ret);
+
+       if (dev->ep0_in_pending)
+               dev->ep0_in_pending = false;
+       else
+               dev->ep0_out_pending = false;
 
-       return true;
+out_unlock:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret;
 }
 
 static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
@@ -713,7 +753,7 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
        int ret = 0, i;
        unsigned long flags;
        struct usb_endpoint_descriptor *desc;
-       struct usb_ep *ep = NULL;
+       struct raw_ep *ep;
 
        desc = memdup_user((void __user *)value, sizeof(*desc));
        if (IS_ERR(desc))
@@ -741,41 +781,32 @@ static int raw_ioctl_ep_enable(struct raw_dev *dev, unsigned long value)
                goto out_free;
        }
 
-       for (i = 0; i < USB_RAW_MAX_ENDPOINTS; i++) {
-               if (dev->eps[i].state == STATE_EP_ENABLED)
+       for (i = 0; i < dev->eps_num; i++) {
+               ep = &dev->eps[i];
+               if (ep->state != STATE_EP_DISABLED)
                        continue;
-               break;
-       }
-       if (i == USB_RAW_MAX_ENDPOINTS) {
-               dev_dbg(&dev->gadget->dev,
-                               "fail, no device endpoints available\n");
-               ret = -EBUSY;
-               goto out_free;
-       }
-
-       gadget_for_each_ep(ep, dev->gadget) {
-               if (ep->enabled)
+               if (ep->addr != usb_endpoint_num(desc) &&
+                               ep->addr != USB_RAW_EP_ADDR_ANY)
                        continue;
-               if (!check_ep_caps(ep, desc))
+               if (!usb_gadget_ep_match_desc(dev->gadget, ep->ep, desc, NULL))
                        continue;
-               ep->desc = desc;
-               ret = usb_ep_enable(ep);
+               ep->ep->desc = desc;
+               ret = usb_ep_enable(ep->ep);
                if (ret < 0) {
                        dev_err(&dev->gadget->dev,
                                "fail, usb_ep_enable returned %d\n", ret);
                        goto out_free;
                }
-               dev->eps[i].req = usb_ep_alloc_request(ep, GFP_ATOMIC);
-               if (!dev->eps[i].req) {
+               ep->req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+               if (!ep->req) {
                        dev_err(&dev->gadget->dev,
                                "fail, usb_ep_alloc_request failed\n");
-                       usb_ep_disable(ep);
+                       usb_ep_disable(ep->ep);
                        ret = -ENOMEM;
                        goto out_free;
                }
-               dev->eps[i].ep = ep;
-               dev->eps[i].state = STATE_EP_ENABLED;
-               ep->driver_data = &dev->eps[i];
+               ep->state = STATE_EP_ENABLED;
+               ep->ep->driver_data = ep;
                ret = i;
                goto out_unlock;
        }
@@ -794,10 +825,6 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
 {
        int ret = 0, i = value;
        unsigned long flags;
-       const void *desc;
-
-       if (i < 0 || i >= USB_RAW_MAX_ENDPOINTS)
-               return -EINVAL;
 
        spin_lock_irqsave(&dev->lock, flags);
        if (dev->state != STATE_DEV_RUNNING) {
@@ -810,7 +837,12 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
                ret = -EBUSY;
                goto out_unlock;
        }
-       if (dev->eps[i].state != STATE_EP_ENABLED) {
+       if (i < 0 || i >= dev->eps_num) {
+               dev_dbg(dev->dev, "fail, invalid endpoint\n");
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+       if (dev->eps[i].state == STATE_EP_DISABLED) {
                dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
                ret = -EINVAL;
                goto out_unlock;
@@ -834,10 +866,8 @@ static int raw_ioctl_ep_disable(struct raw_dev *dev, unsigned long value)
 
        spin_lock_irqsave(&dev->lock, flags);
        usb_ep_free_request(dev->eps[i].ep, dev->eps[i].req);
-       desc = dev->eps[i].ep->desc;
-       dev->eps[i].ep = NULL;
+       kfree(dev->eps[i].ep->desc);
        dev->eps[i].state = STATE_EP_DISABLED;
-       kfree(desc);
        dev->eps[i].disabling = false;
 
 out_unlock:
@@ -845,6 +875,74 @@ out_unlock:
        return ret;
 }
 
+static int raw_ioctl_ep_set_clear_halt_wedge(struct raw_dev *dev,
+               unsigned long value, bool set, bool halt)
+{
+       int ret = 0, i = value;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->state != STATE_DEV_RUNNING) {
+               dev_dbg(dev->dev, "fail, device is not running\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (!dev->gadget) {
+               dev_dbg(dev->dev, "fail, gadget is not bound\n");
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+       if (i < 0 || i >= dev->eps_num) {
+               dev_dbg(dev->dev, "fail, invalid endpoint\n");
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+       if (dev->eps[i].state == STATE_EP_DISABLED) {
+               dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (dev->eps[i].disabling) {
+               dev_dbg(&dev->gadget->dev,
+                               "fail, disable is in progress\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (dev->eps[i].urb_queued) {
+               dev_dbg(&dev->gadget->dev,
+                               "fail, waiting for urb completion\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       if (usb_endpoint_xfer_isoc(dev->eps[i].ep->desc)) {
+               dev_dbg(&dev->gadget->dev,
+                               "fail, can't halt/wedge ISO endpoint\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (set && halt) {
+               ret = usb_ep_set_halt(dev->eps[i].ep);
+               if (ret < 0)
+                       dev_err(&dev->gadget->dev,
+                               "fail, usb_ep_set_halt returned %d\n", ret);
+       } else if (!set && halt) {
+               ret = usb_ep_clear_halt(dev->eps[i].ep);
+               if (ret < 0)
+                       dev_err(&dev->gadget->dev,
+                               "fail, usb_ep_clear_halt returned %d\n", ret);
+       } else if (set && !halt) {
+               ret = usb_ep_set_wedge(dev->eps[i].ep);
+               if (ret < 0)
+                       dev_err(&dev->gadget->dev,
+                               "fail, usb_ep_set_wedge returned %d\n", ret);
+       }
+
+out_unlock:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret;
+}
+
 static void gadget_ep_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct raw_ep *r_ep = (struct raw_ep *)ep->driver_data;
@@ -866,7 +964,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
 {
        int ret = 0;
        unsigned long flags;
-       struct raw_ep *ep = &dev->eps[io->ep];
+       struct raw_ep *ep;
        DECLARE_COMPLETION_ONSTACK(done);
 
        spin_lock_irqsave(&dev->lock, flags);
@@ -880,6 +978,12 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
                ret = -EBUSY;
                goto out_unlock;
        }
+       if (io->ep >= dev->eps_num) {
+               dev_dbg(&dev->gadget->dev, "fail, invalid endpoint\n");
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       ep = &dev->eps[io->ep];
        if (ep->state != STATE_EP_ENABLED) {
                dev_dbg(&dev->gadget->dev, "fail, endpoint is not enabled\n");
                ret = -EBUSY;
@@ -964,12 +1068,14 @@ static int raw_ioctl_ep_read(struct raw_dev *dev, unsigned long value)
        if (IS_ERR(data))
                return PTR_ERR(data);
        ret = raw_process_ep_io(dev, &io, data, false);
-       if (ret)
+       if (ret < 0)
                goto free;
 
        length = min(io.length, (unsigned int)ret);
        if (copy_to_user((void __user *)(value + sizeof(io)), data, length))
                ret = -EFAULT;
+       else
+               ret = length;
 free:
        kfree(data);
        return ret;
@@ -1023,6 +1129,71 @@ out_unlock:
        return ret;
 }
 
+static void fill_ep_caps(struct usb_ep_caps *caps,
+                               struct usb_raw_ep_caps *raw_caps)
+{
+       raw_caps->type_control = caps->type_control;
+       raw_caps->type_iso = caps->type_iso;
+       raw_caps->type_bulk = caps->type_bulk;
+       raw_caps->type_int = caps->type_int;
+       raw_caps->dir_in = caps->dir_in;
+       raw_caps->dir_out = caps->dir_out;
+}
+
+static void fill_ep_limits(struct usb_ep *ep, struct usb_raw_ep_limits *limits)
+{
+       limits->maxpacket_limit = ep->maxpacket_limit;
+       limits->max_streams = ep->max_streams;
+}
+
+static int raw_ioctl_eps_info(struct raw_dev *dev, unsigned long value)
+{
+       int ret = 0, i;
+       unsigned long flags;
+       struct usb_raw_eps_info *info;
+       struct raw_ep *ep;
+
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       if (!info) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->state != STATE_DEV_RUNNING) {
+               dev_dbg(dev->dev, "fail, device is not running\n");
+               ret = -EINVAL;
+               spin_unlock_irqrestore(&dev->lock, flags);
+               goto out_free;
+       }
+       if (!dev->gadget) {
+               dev_dbg(dev->dev, "fail, gadget is not bound\n");
+               ret = -EBUSY;
+               spin_unlock_irqrestore(&dev->lock, flags);
+               goto out_free;
+       }
+
+       memset(info, 0, sizeof(*info));
+       for (i = 0; i < dev->eps_num; i++) {
+               ep = &dev->eps[i];
+               strscpy(&info->eps[i].name[0], ep->ep->name,
+                               USB_RAW_EP_NAME_MAX);
+               info->eps[i].addr = ep->addr;
+               fill_ep_caps(&ep->ep->caps, &info->eps[i].caps);
+               fill_ep_limits(ep->ep, &info->eps[i].limits);
+       }
+       ret = dev->eps_num;
+       spin_unlock_irqrestore(&dev->lock, flags);
+
+       if (copy_to_user((void __user *)value, info, sizeof(*info)))
+               ret = -EFAULT;
+
+out_free:
+       kfree(info);
+out:
+       return ret;
+}
+
 static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
 {
        struct raw_dev *dev = fd->private_data;
@@ -1065,6 +1236,24 @@ static long raw_ioctl(struct file *fd, unsigned int cmd, unsigned long value)
        case USB_RAW_IOCTL_VBUS_DRAW:
                ret = raw_ioctl_vbus_draw(dev, value);
                break;
+       case USB_RAW_IOCTL_EPS_INFO:
+               ret = raw_ioctl_eps_info(dev, value);
+               break;
+       case USB_RAW_IOCTL_EP0_STALL:
+               ret = raw_ioctl_ep0_stall(dev, value);
+               break;
+       case USB_RAW_IOCTL_EP_SET_HALT:
+               ret = raw_ioctl_ep_set_clear_halt_wedge(
+                                       dev, value, true, true);
+               break;
+       case USB_RAW_IOCTL_EP_CLEAR_HALT:
+               ret = raw_ioctl_ep_set_clear_halt_wedge(
+                                       dev, value, false, true);
+               break;
+       case USB_RAW_IOCTL_EP_SET_WEDGE:
+               ret = raw_ioctl_ep_set_clear_halt_wedge(
+                                       dev, value, true, false);
+               break;
        default:
                ret = -EINVAL;
        }
index 22200341c8ec3a93f73d0132187616b98519a79b..b771a854e29c36f20c54aa847ed971df953aaebe 100644 (file)
@@ -185,7 +185,7 @@ static int regs_dbg_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-const struct file_operations queue_dbg_fops = {
+static const struct file_operations queue_dbg_fops = {
        .owner          = THIS_MODULE,
        .open           = queue_dbg_open,
        .llseek         = no_llseek,
@@ -193,7 +193,7 @@ const struct file_operations queue_dbg_fops = {
        .release        = queue_dbg_release,
 };
 
-const struct file_operations regs_dbg_fops = {
+static const struct file_operations regs_dbg_fops = {
        .owner          = THIS_MODULE,
        .open           = regs_dbg_open,
        .llseek         = generic_file_llseek,
index a8273b589456b930be62c403d79a683025ed2c81..5af0fe9c61d759fb24e8f20cc4d86d95452de4d5 100644 (file)
@@ -2647,6 +2647,8 @@ net2272_plat_probe(struct platform_device *pdev)
  err_req:
        release_mem_region(base, len);
  err:
+       kfree(dev);
+
        return ret;
 }
 
index 52a6add961f443fef3fdf49e4e8aa8dd0d8049b9..dfabc54cdc279049a611a21a9f46cf429fee800c 100644 (file)
@@ -3840,11 +3840,11 @@ static int __maybe_unused tegra_xudc_suspend(struct device *dev)
 
        flush_work(&xudc->usb_role_sw_work);
 
-       /* Forcibly disconnect before powergating. */
-       tegra_xudc_device_mode_off(xudc);
-
-       if (!pm_runtime_status_suspended(dev))
+       if (!pm_runtime_status_suspended(dev)) {
+               /* Forcibly disconnect before powergating. */
+               tegra_xudc_device_mode_off(xudc);
                tegra_xudc_powergate(xudc);
+       }
 
        pm_runtime_disable(dev);
 
index 1d4f6f85f0febd1d967c6619047156c9607cc2cb..ea460b9682d5ff4619634ac5ddd6c40ee4ec084a 100644 (file)
@@ -362,6 +362,7 @@ static int xhci_plat_remove(struct platform_device *dev)
        struct clk *reg_clk = xhci->reg_clk;
        struct usb_hcd *shared_hcd = xhci->shared_hcd;
 
+       pm_runtime_get_sync(&dev->dev);
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
        usb_remove_hcd(shared_hcd);
@@ -375,8 +376,9 @@ static int xhci_plat_remove(struct platform_device *dev)
        clk_disable_unprepare(reg_clk);
        usb_put_hcd(hcd);
 
-       pm_runtime_set_suspended(&dev->dev);
        pm_runtime_disable(&dev->dev);
+       pm_runtime_put_noidle(&dev->dev);
+       pm_runtime_set_suspended(&dev->dev);
 
        return 0;
 }
index 0fda0c0f4d31fce5cc555dabceda120ee12fa9c4..2c255d0620b054d789e6a5b8a044205debd4bbab 100644 (file)
@@ -3433,8 +3433,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        /* New sg entry */
                        --num_sgs;
                        sent_len -= block_len;
-                       if (num_sgs != 0) {
-                               sg = sg_next(sg);
+                       sg = sg_next(sg);
+                       if (num_sgs != 0 && sg) {
                                block_len = sg_dma_len(sg);
                                addr = (u64) sg_dma_address(sg);
                                addr += sent_len;
index c96e5dab0a480fea8a8fb8e6ad17c69e50b71a6d..fdeade6254aeccad2bb13da9ac7dbce4edaa23b6 100644 (file)
@@ -276,7 +276,7 @@ static const struct file_operations mtu3_ep_fops = {
        .release = single_release,
 };
 
-static struct debugfs_reg32 mtu3_prb_regs[] = {
+static const struct debugfs_reg32 mtu3_prb_regs[] = {
        dump_prb_reg("enable", U3D_SSUSB_PRB_CTRL0),
        dump_prb_reg("byte-sell", U3D_SSUSB_PRB_CTRL1),
        dump_prb_reg("byte-selh", U3D_SSUSB_PRB_CTRL2),
@@ -349,7 +349,7 @@ static const struct file_operations mtu3_probe_fops = {
 static void mtu3_debugfs_create_prb_files(struct mtu3 *mtu)
 {
        struct ssusb_mtk *ssusb = mtu->ssusb;
-       struct debugfs_reg32 *regs;
+       const struct debugfs_reg32 *regs;
        struct dentry *dir_prb;
        int i;
 
index bfebf1f2e991cf0c00793e1c517b30fb8c66a60e..9a7e655d528011e57d9319be026e0914f8ddbf8b 100644 (file)
@@ -377,7 +377,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
        if (status < 0) {
                dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                        twl->irq1, status);
-               return status;
+               goto err_put_regulator;
        }
 
        status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
@@ -386,8 +386,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
        if (status < 0) {
                dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                        twl->irq2, status);
-               free_irq(twl->irq1, twl);
-               return status;
+               goto err_free_irq1;
        }
 
        twl->asleep = 0;
@@ -396,6 +395,13 @@ static int twl6030_usb_probe(struct platform_device *pdev)
        dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
 
        return 0;
+
+err_free_irq1:
+       free_irq(twl->irq1, twl);
+err_put_regulator:
+       regulator_put(twl->usb3v3);
+
+       return status;
 }
 
 static int twl6030_usb_remove(struct platform_device *pdev)
index ffd984142171747c6f45e17936bf507b7e4c70b2..d63072fee099ce062757ebb9aa25bd25ba901f51 100644 (file)
@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
                   send it directly to the tty port */
                if (garmin_data_p->flags & FLAGS_QUEUING) {
                        pkt_add(garmin_data_p, data, data_length);
-               } else if (bulk_data ||
-                          getLayerId(data) == GARMIN_LAYERID_APPL) {
+               } else if (bulk_data || (data_length >= sizeof(u32) &&
+                               getLayerId(data) == GARMIN_LAYERID_APPL)) {
 
                        spin_lock_irqsave(&garmin_data_p->lock, flags);
                        garmin_data_p->flags |= APP_RESP_SEEN;
index 613f91add03da189c0fd5334cbccbbf720060079..ce0401d3137f1341929b3c3027175ac11938ca2f 100644 (file)
@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {DEVICE_SWI(0x413c, 0x81b5)},   /* Dell Wireless 5811e QDL */
        {DEVICE_SWI(0x413c, 0x81b6)},   /* Dell Wireless 5811e QDL */
+       {DEVICE_SWI(0x413c, 0x81cc)},   /* Dell Wireless 5816e */
        {DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
        {DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
        {DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
index 1b23741036ee8e913f6a8e508e754a94d78fb19e..37157ed9a881a358968d85803428f95786628fc5 100644 (file)
  * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
  */
 
+/* Reported-by: Julian Groß <julian.g@posteo.de> */
+UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+               "LaCie",
+               "2Big Quadra USB3",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
  * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
index f5c5e0aef66f069e428e49724eb3e9dabd2ab17a..c22e5c4bbf1a9e1bf25ed0fa310198b836b5b7c6 100644 (file)
@@ -63,6 +63,7 @@ enum {
 #define PMC_USB_ALTMODE_DP_MODE_SHIFT  8
 
 /* TBT specific Mode Data bits */
+#define PMC_USB_ALTMODE_HPD_HIGH       BIT(14)
 #define PMC_USB_ALTMODE_TBT_TYPE       BIT(17)
 #define PMC_USB_ALTMODE_CABLE_TYPE     BIT(18)
 #define PMC_USB_ALTMODE_ACTIVE_LINK    BIT(20)
@@ -74,8 +75,8 @@ enum {
 #define PMC_USB_ALTMODE_TBT_GEN(_g_)   (((_g_) & GENMASK(1, 0)) << 28)
 
 /* Display HPD Request bits */
+#define PMC_USB_DP_HPD_LVL             BIT(4)
 #define PMC_USB_DP_HPD_IRQ             BIT(5)
-#define PMC_USB_DP_HPD_LVL             BIT(6)
 
 struct pmc_usb;
 
@@ -157,6 +158,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
        req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
                         PMC_USB_ALTMODE_DP_MODE_SHIFT;
 
+       if (data->status & DP_STATUS_HPD_STATE)
+               req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
+
        return pmc_usb_command(port, (void *)&req, sizeof(req));
 }
 
@@ -298,11 +302,11 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
        struct typec_mux_desc mux_desc = { };
        int ret;
 
-       ret = fwnode_property_read_u8(fwnode, "usb2-port", &port->usb2_port);
+       ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
        if (ret)
                return ret;
 
-       ret = fwnode_property_read_u8(fwnode, "usb3-port", &port->usb3_port);
+       ret = fwnode_property_read_u8(fwnode, "usb3-port-number", &port->usb3_port);
        if (ret)
                return ret;
 
index 7957d2d41fc4b4cbc71c81441d612b3b7a47cbdc..01c456f7c1f7bda2df1c592c6056b54585b40ba6 100644 (file)
@@ -89,15 +89,14 @@ static struct vdpasim *dev_to_sim(struct device *dev)
 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
 {
        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
-       int ret;
 
-       ret = vringh_init_iotlb(&vq->vring, vdpasim_features,
-                               VDPASIM_QUEUE_MAX, false,
-                               (struct vring_desc *)(uintptr_t)vq->desc_addr,
-                               (struct vring_avail *)
-                               (uintptr_t)vq->driver_addr,
-                               (struct vring_used *)
-                               (uintptr_t)vq->device_addr);
+       vringh_init_iotlb(&vq->vring, vdpasim_features,
+                         VDPASIM_QUEUE_MAX, false,
+                         (struct vring_desc *)(uintptr_t)vq->desc_addr,
+                         (struct vring_avail *)
+                         (uintptr_t)vq->driver_addr,
+                         (struct vring_used *)
+                         (uintptr_t)vq->device_addr);
 }
 
 static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
index 85b32c3252829de7fb42b5fcb3c6ebde6e989555..cc1d64765ce791ee48e14960620bd039028fffc2 100644 (file)
@@ -342,8 +342,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
        vma = find_vma_intersection(mm, vaddr, vaddr + 1);
 
        if (vma && vma->vm_flags & VM_PFNMAP) {
-               *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               if (is_invalid_reserved_pfn(*pfn))
+               if (!follow_pfn(vma, vaddr, pfn) &&
+                   is_invalid_reserved_pfn(*pfn))
                        ret = 0;
        }
 done:
@@ -555,7 +555,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
                        continue;
                }
 
-               remote_vaddr = dma->vaddr + iova - dma->iova;
+               remote_vaddr = dma->vaddr + (iova - dma->iova);
                ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
                                             do_accounting);
                if (ret)
@@ -2345,10 +2345,10 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
        vaddr = dma->vaddr + offset;
 
        if (write)
-               *copied = __copy_to_user((void __user *)vaddr, data,
+               *copied = copy_to_user((void __user *)vaddr, data,
                                         count) ? 0 : count;
        else
-               *copied = __copy_from_user(data, (void __user *)vaddr,
+               *copied = copy_from_user(data, (void __user *)vaddr,
                                           count) ? 0 : count;
        if (kthread)
                unuse_mm(mm);
index d450e16c5c254cf470d8f4fae55dd1936e7ecb8d..21a59b598ed87312685f60185a72374ee2f3fc9a 100644 (file)
@@ -730,7 +730,7 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
        if (!map)
                return NULL;
 
-       return (void *)(uintptr_t)(map->addr + addr - map->start);
+       return (void __user *)(uintptr_t)(map->addr + addr - map->start);
 }
 
 /* Can we switch to this memory table? */
@@ -869,7 +869,7 @@ static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
  * not happen in this case.
  */
 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
-                                           void *addr, unsigned int size,
+                                           void __user *addr, unsigned int size,
                                            int type)
 {
        void __user *uaddr = vhost_vq_meta_fetch(vq,
index e36aaf9ba7bd9d29008fb23b4bf87b44a1b4e1bd..fb4e944c4d0d77d18c7aa6ebccecd66e33d0e289 100644 (file)
@@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                        break;
                }
 
-               vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
-               added = true;
-
-               /* Deliver to monitoring devices all correctly transmitted
-                * packets.
+               /* Deliver to monitoring devices all packets that we
+                * will transmit.
                 */
                virtio_transport_deliver_tap_pkt(pkt);
 
+               vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+               added = true;
+
                pkt->off += payload_len;
                total_len += payload_len;
 
@@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                 * to send it with the next available buffer.
                 */
                if (pkt->off < pkt->len) {
+                       /* We are queueing the same virtio_vsock_pkt to handle
+                        * the remaining bytes, and we want to deliver it
+                        * to monitoring devices in the next iteration.
+                        */
+                       pkt->tap_delivered = false;
+
                        spin_lock_bh(&vsock->send_pkt_list_lock);
                        list_add(&pkt->list, &vsock->send_pkt_list);
                        spin_unlock_bh(&vsock->send_pkt_list_lock);
@@ -543,6 +549,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                mutex_unlock(&vq->mutex);
        }
 
+       /* Some packets may have been queued before the device was started,
+        * let's kick the send worker to send them.
+        */
+       vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
        mutex_unlock(&vsock->dev.mutex);
        return 0;
 
index a587767b6ae15a549a57052f389e57381c9599f4..37d1bba57b00c5cd48d80ab85ef9e1788912a889 100644 (file)
@@ -32,9 +32,8 @@ void afs_fileserver_probe_result(struct afs_call *call)
        struct afs_server *server = call->server;
        unsigned int server_index = call->server_index;
        unsigned int index = call->addr_ix;
-       unsigned int rtt = UINT_MAX;
+       unsigned int rtt_us = 0;
        bool have_result = false;
-       u64 _rtt;
        int ret = call->error;
 
        _enter("%pU,%u", &server->uuid, index);
@@ -93,15 +92,9 @@ responded:
                }
        }
 
-       /* Get the RTT and scale it to fit into a 32-bit value that represents
-        * over a minute of time so that we can access it with one instruction
-        * on a 32-bit system.
-        */
-       _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
-       _rtt /= 64;
-       rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
-       if (rtt < server->probe.rtt) {
-               server->probe.rtt = rtt;
+       rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
+       if (rtt_us < server->probe.rtt) {
+               server->probe.rtt = rtt_us;
                alist->preferred = index;
                have_result = true;
        }
@@ -113,8 +106,7 @@ out:
        spin_unlock(&server->probe_lock);
 
        _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
-              server_index, index, &alist->addrs[index].transport,
-              (unsigned int)rtt, ret);
+              server_index, index, &alist->addrs[index].transport, rtt_us, ret);
 
        have_result |= afs_fs_probe_done(server);
        if (have_result)
index 68fc46634346a13270497bbcf87cfb0d99340d02..d2b3798c1932f54dfbbc5fcd2fae0e50ac3d83d1 100644 (file)
@@ -385,8 +385,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                ASSERTCMP(req->offset, <=, PAGE_SIZE);
                if (req->offset == PAGE_SIZE) {
                        req->offset = 0;
-                       if (req->page_done)
-                               req->page_done(req);
                        req->index++;
                        if (req->remain > 0)
                                goto begin_page;
@@ -440,11 +438,13 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                if (req->offset < PAGE_SIZE)
                        zero_user_segment(req->pages[req->index],
                                          req->offset, PAGE_SIZE);
-               if (req->page_done)
-                       req->page_done(req);
                req->offset = 0;
        }
 
+       if (req->page_done)
+               for (req->index = 0; req->index < req->nr_pages; req->index++)
+                       req->page_done(req);
+
        _leave(" = 0 [done]");
        return 0;
 }
index 858498cc1b05224bc52c5879ae559ed2f4fc4072..e3aa013c21779241e399014b504a3978e5fe7c35 100644 (file)
@@ -31,10 +31,9 @@ void afs_vlserver_probe_result(struct afs_call *call)
        struct afs_addr_list *alist = call->alist;
        struct afs_vlserver *server = call->vlserver;
        unsigned int server_index = call->server_index;
+       unsigned int rtt_us = 0;
        unsigned int index = call->addr_ix;
-       unsigned int rtt = UINT_MAX;
        bool have_result = false;
-       u64 _rtt;
        int ret = call->error;
 
        _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code);
@@ -93,15 +92,9 @@ responded:
                }
        }
 
-       /* Get the RTT and scale it to fit into a 32-bit value that represents
-        * over a minute of time so that we can access it with one instruction
-        * on a 32-bit system.
-        */
-       _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
-       _rtt /= 64;
-       rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
-       if (rtt < server->probe.rtt) {
-               server->probe.rtt = rtt;
+       rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
+       if (rtt_us < server->probe.rtt) {
+               server->probe.rtt = rtt_us;
                alist->preferred = index;
                have_result = true;
        }
@@ -113,8 +106,7 @@ out:
        spin_unlock(&server->probe_lock);
 
        _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
-              server_index, index, &alist->addrs[index].transport,
-              (unsigned int)rtt, ret);
+              server_index, index, &alist->addrs[index].transport, rtt_us, ret);
 
        have_result |= afs_vl_probe_done(server);
        if (have_result) {
index b5b45c57e1b1d3f94db891811b2f8e9cb457c837..fe413e7a5cf4297878672a81d0048b3ef6383aef 100644 (file)
@@ -497,8 +497,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
                ASSERTCMP(req->offset, <=, PAGE_SIZE);
                if (req->offset == PAGE_SIZE) {
                        req->offset = 0;
-                       if (req->page_done)
-                               req->page_done(req);
                        req->index++;
                        if (req->remain > 0)
                                goto begin_page;
@@ -556,11 +554,13 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
                if (req->offset < PAGE_SIZE)
                        zero_user_segment(req->pages[req->index],
                                          req->offset, PAGE_SIZE);
-               if (req->page_done)
-                       req->page_done(req);
                req->offset = 0;
        }
 
+       if (req->page_done)
+               for (req->index = 0; req->index < req->nr_pages; req->index++)
+                       req->page_done(req);
+
        _leave(" = 0 [done]");
        return 0;
 }
index 13f25e241ac46cbd2f5ffa23de45e60a035a0c1a..25d489bc9453330697b70af11bfa7c077883321d 100644 (file)
@@ -1733,7 +1733,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
                    (!regset->active || regset->active(t->task, regset) > 0)) {
                        int ret;
                        size_t size = regset_size(t->task, regset);
-                       void *data = kmalloc(size, GFP_KERNEL);
+                       void *data = kzalloc(size, GFP_KERNEL);
                        if (unlikely(!data))
                                return 0;
                        ret = regset->get(t->task, regset,
index 9c380e7edf629155af9a351873f4b7c615888bfb..0cc02577577bc98ddc3a68f53be308f4a6fcf62c 100644 (file)
@@ -391,7 +391,7 @@ static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
        struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
        struct rb_node *parent = NULL;
        struct prelim_ref *ref = NULL;
-       struct prelim_ref target = {0};
+       struct prelim_ref target = {};
        int result;
 
        target.parent = bytenr;
index 47f66c6a7d7fca39d6d55b7338f2685ac6b0155e..696f47103cfc93b9234d1bb9e27f06be49a1495c 100644 (file)
@@ -916,7 +916,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto out;
+               goto out_put_group;
        }
 
        /*
@@ -954,7 +954,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
-                       goto out;
+                       goto out_put_group;
                }
                clear_nlink(inode);
                /* One for the block groups ref */
@@ -977,13 +977,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
-               goto out;
+               goto out_put_group;
        if (ret > 0)
                btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
-                       goto out;
+                       goto out_put_group;
                btrfs_release_path(path);
        }
 
@@ -1102,9 +1102,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = remove_block_group_free_space(trans, block_group);
        if (ret)
-               goto out;
+               goto out_put_group;
 
-       btrfs_put_block_group(block_group);
+       /* Once for the block groups rbtree */
        btrfs_put_block_group(block_group);
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
@@ -1127,6 +1127,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                /* once for the tree */
                free_extent_map(em);
        }
+
+out_put_group:
+       /* Once for the lookup reference */
+       btrfs_put_block_group(block_group);
 out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
@@ -1288,11 +1292,15 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
        if (ret)
                goto err;
        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+       if (prev_trans)
+               btrfs_put_transaction(prev_trans);
 
        return true;
 
 err:
        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+       if (prev_trans)
+               btrfs_put_transaction(prev_trans);
        btrfs_dec_block_group_ro(bg);
        return false;
 }
index 21a15776dac421273316740a4d1741045a1134e6..353228d62f5a1232bea3363d3f702aa9b4ee842b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 
 #ifndef BTRFS_DISCARD_H
 #define BTRFS_DISCARD_H
index a6cb5cbbdb9f56a849cacceba0b96b3ee825cb97..d10c7be10f3b80158201c81707c116e9b9ab9e42 100644 (file)
@@ -2036,9 +2036,6 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
                for (i = 0; i < ret; i++)
                        btrfs_drop_and_free_fs_root(fs_info, gang[i]);
        }
-
-       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
-               btrfs_free_log_root_tree(NULL, fs_info);
 }
 
 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
@@ -3888,7 +3885,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
        spin_unlock(&fs_info->fs_roots_radix_lock);
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
-               btrfs_free_log(NULL, root);
+               ASSERT(root->log_root == NULL);
                if (root->reloc_root) {
                        btrfs_put_root(root->reloc_root);
                        root->reloc_root = NULL;
@@ -4211,6 +4208,36 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
        up_write(&fs_info->cleanup_work_sem);
 }
 
+static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *gang[8];
+       u64 root_objectid = 0;
+       int ret;
+
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, root_objectid,
+                                            ARRAY_SIZE(gang))) != 0) {
+               int i;
+
+               for (i = 0; i < ret; i++)
+                       gang[i] = btrfs_grab_root(gang[i]);
+               spin_unlock(&fs_info->fs_roots_radix_lock);
+
+               for (i = 0; i < ret; i++) {
+                       if (!gang[i])
+                               continue;
+                       root_objectid = gang[i]->root_key.objectid;
+                       btrfs_free_log(NULL, gang[i]);
+                       btrfs_put_root(gang[i]);
+               }
+               root_objectid++;
+               spin_lock(&fs_info->fs_roots_radix_lock);
+       }
+       spin_unlock(&fs_info->fs_roots_radix_lock);
+       btrfs_free_log_root_tree(NULL, fs_info);
+}
+
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
 {
        struct btrfs_ordered_extent *ordered;
@@ -4603,6 +4630,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
        btrfs_destroy_delayed_inodes(fs_info);
        btrfs_assert_delayed_root_empty(fs_info);
        btrfs_destroy_all_delalloc_inodes(fs_info);
+       btrfs_drop_all_logs(fs_info);
        mutex_unlock(&fs_info->transaction_kthread_mutex);
 
        return 0;
index d35936c934ab0dacc8623b800c3843fc09a7cca3..03bc7134e8cbf4282d3b872b5544b4b7750ea6fc 100644 (file)
@@ -4559,6 +4559,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
                if (IS_ERR(fs_root)) {
                        err = PTR_ERR(fs_root);
                        list_add_tail(&reloc_root->root_list, &reloc_roots);
+                       btrfs_end_transaction(trans);
                        goto out_unset;
                }
 
index 8cede6eb9843c73f432325dbfd98af132c2563ba..2d5498136e5ef82798ece5a9230e3b72204b3513 100644 (file)
@@ -662,10 +662,19 @@ again:
        }
 
 got_it:
-       btrfs_record_root_in_trans(h, root);
-
        if (!current->journal_info)
                current->journal_info = h;
+
+       /*
+        * btrfs_record_root_in_trans() needs to alloc new extents, and may
+        * call btrfs_join_transaction() while we're also starting a
+        * transaction.
+        *
+        * Thus it need to be called after current->journal_info initialized,
+        * or we can deadlock.
+        */
+       btrfs_record_root_in_trans(h, root);
+
        return h;
 
 join_fail:
index ec36a7c6ba3de86b65b54739d71ecc47a5e42c4b..02ebdd9edc193512ba58b29002d0d53e86fa4510 100644 (file)
@@ -4226,6 +4226,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
        const u64 ino = btrfs_ino(inode);
        struct btrfs_path *dst_path = NULL;
        bool dropped_extents = false;
+       u64 truncate_offset = i_size;
+       struct extent_buffer *leaf;
+       int slot;
        int ins_nr = 0;
        int start_slot;
        int ret;
@@ -4240,9 +4243,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
        if (ret < 0)
                goto out;
 
+       /*
+        * We must check if there is a prealloc extent that starts before the
+        * i_size and crosses the i_size boundary. This is to ensure later we
+        * truncate down to the end of that extent and not to the i_size, as
+        * otherwise we end up losing part of the prealloc extent after a log
+        * replay and with an implicit hole if there is another prealloc extent
+        * that starts at an offset beyond i_size.
+        */
+       ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+       if (ret < 0)
+               goto out;
+
+       if (ret == 0) {
+               struct btrfs_file_extent_item *ei;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+               if (btrfs_file_extent_type(leaf, ei) ==
+                   BTRFS_FILE_EXTENT_PREALLOC) {
+                       u64 extent_end;
+
+                       btrfs_item_key_to_cpu(leaf, &key, slot);
+                       extent_end = key.offset +
+                               btrfs_file_extent_num_bytes(leaf, ei);
+
+                       if (extent_end > i_size)
+                               truncate_offset = extent_end;
+               }
+       } else {
+               ret = 0;
+       }
+
        while (true) {
-               struct extent_buffer *leaf = path->nodes[0];
-               int slot = path->slots[0];
+               leaf = path->nodes[0];
+               slot = path->slots[0];
 
                if (slot >= btrfs_header_nritems(leaf)) {
                        if (ins_nr > 0) {
@@ -4280,7 +4317,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
                                ret = btrfs_truncate_inode_items(trans,
                                                         root->log_root,
                                                         &inode->vfs_inode,
-                                                        i_size,
+                                                        truncate_offset,
                                                         BTRFS_EXTENT_DATA_KEY);
                        } while (ret == -EAGAIN);
                        if (ret)
index 1dc97f2d62013059ac66399907f38eaf9409ec30..e7726f5f1241c23a92c1e486045478c274ba9c6c 100644 (file)
@@ -60,9 +60,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        object = container_of(op->op.object, struct cachefiles_object, fscache);
        spin_lock(&object->work_lock);
        list_add_tail(&monitor->op_link, &op->to_do);
+       fscache_enqueue_retrieval(op);
        spin_unlock(&object->work_lock);
 
-       fscache_enqueue_retrieval(op);
        fscache_put_retrieval(op);
        return 0;
 }
@@ -398,7 +398,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
        struct inode *inode;
        sector_t block;
        unsigned shift;
-       int ret;
+       int ret, ret2;
 
        object = container_of(op->op.object,
                              struct cachefiles_object, fscache);
@@ -430,8 +430,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
        block = page->index;
        block <<= shift;
 
-       ret = bmap(inode, &block);
-       ASSERT(ret < 0);
+       ret2 = bmap(inode, &block);
+       ASSERT(ret2 == 0);
 
        _debug("%llx -> %llx",
               (unsigned long long) (page->index << shift),
@@ -739,8 +739,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
                block = page->index;
                block <<= shift;
 
-               ret = bmap(inode, &block);
-               ASSERT(!ret);
+               ret2 = bmap(inode, &block);
+               ASSERT(ret2 == 0);
 
                _debug("%llx -> %llx",
                       (unsigned long long) (page->index << shift),
index 185db76300b31c2d71a68fa6065752620d32689d..f1acde6fb9a6181985e878e981a977719c8c7f68 100644 (file)
@@ -2749,7 +2749,7 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
 
        ret = try_get_cap_refs(inode, need, want, 0, flags, got);
        /* three special error codes */
-       if (ret == -EAGAIN || ret == -EFBIG || ret == -EAGAIN)
+       if (ret == -EAGAIN || ret == -EFBIG || ret == -ESTALE)
                ret = 0;
        return ret;
 }
@@ -3746,6 +3746,7 @@ retry:
                WARN_ON(1);
                tsession = NULL;
                target = -1;
+               mutex_lock(&session->s_mutex);
        }
        goto retry;
 
@@ -3990,7 +3991,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                        __ceph_queue_cap_release(session, cap);
                        spin_unlock(&session->s_cap_lock);
                }
-               goto done;
+               goto flush_cap_releases;
        }
 
        /* these will work even if we don't have a cap yet */
index 481ac97b4d25bd89d3d0390ad032c0a47220c2ee..dcaed75de9e6a095fb72b53c04c9567feb3c9545 100644 (file)
@@ -271,7 +271,7 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
                                    &congestion_kb_fops);
 
        snprintf(name, sizeof(name), "../../bdi/%s",
-                dev_name(fsc->sb->s_bdi->dev));
+                bdi_dev_name(fsc->sb->s_bdi));
        fsc->debugfs_bdi =
                debugfs_create_symlink("bdi",
                                       fsc->client->debugfs_dir,
index 486f91f9685b1748622a0c9ced523cc97344d163..7c63abf5bea91f5bded40475e2460f240f08ca2c 100644 (file)
@@ -3251,8 +3251,7 @@ static void handle_session(struct ceph_mds_session *session,
        void *end = p + msg->front.iov_len;
        struct ceph_mds_session_head *h;
        u32 op;
-       u64 seq;
-       unsigned long features = 0;
+       u64 seq, features = 0;
        int wake = 0;
        bool blacklisted = false;
 
@@ -3271,9 +3270,8 @@ static void handle_session(struct ceph_mds_session *session,
                        goto bad;
                /* version >= 3, feature bits */
                ceph_decode_32_safe(&p, end, len, bad);
-               ceph_decode_need(&p, end, len, bad);
-               memcpy(&features, p, min_t(size_t, len, sizeof(features)));
-               p += len;
+               ceph_decode_64_safe(&p, end, features, bad);
+               p += len - sizeof(features);
        }
 
        mutex_lock(&mdsc->mutex);
index de56dee60540bae9aad7bf45c995a529d43dfd0b..19507e2fdb57fe70526fb01485b81a4413fff47a 100644 (file)
@@ -159,8 +159,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
        }
 
        if (IS_ERR(in)) {
-               pr_warn("Can't lookup inode %llx (err: %ld)\n",
-                       realm->ino, PTR_ERR(in));
+               dout("Can't lookup inode %llx (err: %ld)\n",
+                    realm->ino, PTR_ERR(in));
                qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
        } else {
                qri->timeout = 0;
index 182b864b3075bdd1f0b079f9047c8a0f4a5d796d..5014a82391ff92987517a43cc7188cba7670bd32 100644 (file)
@@ -2152,8 +2152,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
                        }
                }
 
+               kref_put(&wdata2->refcount, cifs_writedata_release);
                if (rc) {
-                       kref_put(&wdata2->refcount, cifs_writedata_release);
                        if (is_retryable_error(rc))
                                continue;
                        i += nr_pages;
index 0b1528edebcf710bcce1e4fda34384fb95046692..75ddce8ef456dbed90f1d069e99e13aad995df97 100644 (file)
@@ -4060,7 +4060,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
                         * than it negotiated since it will refuse the read
                         * then.
                         */
-                       if ((tcon->ses) && !(tcon->ses->capabilities &
+                       if (!(tcon->ses->capabilities &
                                tcon->ses->server->vals->cap_large_files)) {
                                current_read_size = min_t(uint,
                                        current_read_size, CIFSMaxBufSize);
index 390d2b15ef6ef9d7014e90069cdad3d2880806d3..5d2965a2373054a4c288a9b6b688a05865bd3e4b 100644 (file)
@@ -730,7 +730,7 @@ static __u64 simple_hashstr(const char *str)
  * cifs_backup_query_path_info - SMB1 fallback code to get ino
  *
  * Fallback code to get file metadata when we don't have access to
- * @full_path (EACCESS) and have backup creds.
+ * @full_path (EACCES) and have backup creds.
  *
  * @data will be set to search info result buffer
  * @resp_buf will be set to cifs resp buf and needs to be freed with
index cf7b7e1d5bd7254fe174ebf22d7a02aeb67bcf35..cb733652ecca6cd0679b57acf3db93efdd4751d4 100644 (file)
@@ -1519,6 +1519,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
                spin_lock(&configfs_dirent_lock);
                configfs_detach_rollback(dentry);
                spin_unlock(&configfs_dirent_lock);
+               config_item_put(parent_item);
                return -EINTR;
        }
        frag->frag_dead = true;
index 408418e6aa13120224517139c17b5acc3e282842..478a0d810136adff4dd62ca1013baff51227df5c 100644 (file)
@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
        if (displaced)
                put_files_struct(displaced);
        if (!dump_interrupted()) {
+               /*
+                * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
+                * have this set to NULL.
+                */
+               if (!cprm.file) {
+                       pr_info("Core dump to |%s disabled\n", cn.corename);
+                       goto close_fail;
+               }
                file_start_write(cprm.file);
                core_dumped = binfmt->core_dump(&cprm);
                file_end_write(cprm.file);
index 8c596641a72b09733920dd12643194e2bd4b3ab1..12eebcdea9c8a2a39c9dbf079dd41d8e67648fec 100644 (file)
@@ -1171,6 +1171,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
 {
        struct eventpoll *ep = epi->ep;
 
+       /* Fast preliminary check */
+       if (epi->next != EP_UNACTIVE_PTR)
+               return false;
+
        /* Check that the same epi has not been just chained from another CPU */
        if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
                return false;
@@ -1237,16 +1241,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
         * chained in ep->ovflist and requeued later on.
         */
        if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
-               if (epi->next == EP_UNACTIVE_PTR &&
-                   chain_epi_lockless(epi))
+               if (chain_epi_lockless(epi))
+                       ep_pm_stay_awake_rcu(epi);
+       } else if (!ep_is_linked(epi)) {
+               /* In the usual case, add event to ready list. */
+               if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
                        ep_pm_stay_awake_rcu(epi);
-               goto out_unlock;
-       }
-
-       /* If this file is already in the ready list we exit soon */
-       if (!ep_is_linked(epi) &&
-           list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
-               ep_pm_stay_awake_rcu(epi);
        }
 
        /*
@@ -1822,7 +1822,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 {
        int res = 0, eavail, timed_out = 0;
        u64 slack = 0;
-       bool waiter = false;
        wait_queue_entry_t wait;
        ktime_t expires, *to = NULL;
 
@@ -1867,55 +1866,75 @@ fetch_events:
         */
        ep_reset_busy_poll_napi_id(ep);
 
-       /*
-        * We don't have any available event to return to the caller.  We need
-        * to sleep here, and we will be woken by ep_poll_callback() when events
-        * become available.
-        */
-       if (!waiter) {
-               waiter = true;
-               init_waitqueue_entry(&wait, current);
+       do {
+               /*
+                * Internally init_wait() uses autoremove_wake_function(),
+                * thus wait entry is removed from the wait queue on each
+                * wakeup. Why it is important? In case of several waiters
+                * each new wakeup will hit the next waiter, giving it the
+                * chance to harvest new event. Otherwise wakeup can be
+                * lost. This is also good performance-wise, because on
+                * normal wakeup path no need to call __remove_wait_queue()
+                * explicitly, thus ep->lock is not taken, which halts the
+                * event delivery.
+                */
+               init_wait(&wait);
 
                write_lock_irq(&ep->lock);
-               __add_wait_queue_exclusive(&ep->wq, &wait);
-               write_unlock_irq(&ep->lock);
-       }
-
-       for (;;) {
                /*
-                * We don't want to sleep if the ep_poll_callback() sends us
-                * a wakeup in between. That's why we set the task state
-                * to TASK_INTERRUPTIBLE before doing the checks.
+                * Barrierless variant, waitqueue_active() is called under
+                * the same lock on wakeup ep_poll_callback() side, so it
+                * is safe to avoid an explicit barrier.
                 */
-               set_current_state(TASK_INTERRUPTIBLE);
+               __set_current_state(TASK_INTERRUPTIBLE);
+
                /*
-                * Always short-circuit for fatal signals to allow
-                * threads to make a timely exit without the chance of
-                * finding more events available and fetching
-                * repeatedly.
+                * Do the final check under the lock. ep_scan_ready_list()
+                * plays with two lists (->rdllist and ->ovflist) and there
+                * is always a race when both lists are empty for short
+                * period of time although events are pending, so lock is
+                * important.
                 */
-               if (fatal_signal_pending(current)) {
-                       res = -EINTR;
-                       break;
+               eavail = ep_events_available(ep);
+               if (!eavail) {
+                       if (signal_pending(current))
+                               res = -EINTR;
+                       else
+                               __add_wait_queue_exclusive(&ep->wq, &wait);
                }
+               write_unlock_irq(&ep->lock);
 
-               eavail = ep_events_available(ep);
-               if (eavail)
+               if (eavail || res)
                        break;
-               if (signal_pending(current)) {
-                       res = -EINTR;
-                       break;
-               }
 
                if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
                        timed_out = 1;
                        break;
                }
-       }
+
+               /* We were woken up, thus go and try to harvest some events */
+               eavail = 1;
+
+       } while (0);
 
        __set_current_state(TASK_RUNNING);
 
+       if (!list_empty_careful(&wait.entry)) {
+               write_lock_irq(&ep->lock);
+               __remove_wait_queue(&ep->wq, &wait);
+               write_unlock_irq(&ep->lock);
+       }
+
 send_events:
+       if (fatal_signal_pending(current)) {
+               /*
+                * Always short-circuit for fatal signals to allow
+                * threads to make a timely exit without the chance of
+                * finding more events available and fetching
+                * repeatedly.
+                */
+               res = -EINTR;
+       }
        /*
         * Try to transfer events to user space. In case we get 0 events and
         * there's still timeout left over, we go trying again in search of
@@ -1925,12 +1944,6 @@ send_events:
            !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
                goto fetch_events;
 
-       if (waiter) {
-               write_lock_irq(&ep->lock);
-               __remove_wait_queue(&ep->wq, &wait);
-               write_unlock_irq(&ep->lock);
-       }
-
        return res;
 }
 
index 06b4c550af5d9cc52a7a85ec433c7b7c1c410b54..2c465119affccb8dd5bd8086784e2cc4158a6cb7 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1317,6 +1317,8 @@ int flush_old_exec(struct linux_binprm * bprm)
         */
        set_mm_exe_file(bprm->mm, bprm->file);
 
+       would_dump(bprm, bprm->file);
+
        /*
         * Release all of the old mmap stuff
         */
@@ -1876,8 +1878,6 @@ static int __do_execve_file(int fd, struct filename *filename,
        if (retval < 0)
                goto out;
 
-       would_dump(bprm, bprm->file);
-
        retval = exec_binprm(bprm);
        if (retval < 0)
                goto out;
index 4f76764165cf6f63a99c5e192ab33685e8e987b7..c9db8eb0cfc3ee267d5e3b051974ceae8de861b5 100644 (file)
@@ -348,12 +348,13 @@ out:
 }
 
 const struct file_operations exfat_file_operations = {
-       .llseek      = generic_file_llseek,
-       .read_iter   = generic_file_read_iter,
-       .write_iter  = generic_file_write_iter,
-       .mmap        = generic_file_mmap,
-       .fsync       = generic_file_fsync,
-       .splice_read = generic_file_splice_read,
+       .llseek         = generic_file_llseek,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
+       .mmap           = generic_file_mmap,
+       .fsync          = generic_file_fsync,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = iter_file_splice_write,
 };
 
 const struct inode_operations exfat_file_inode_operations = {
index b72d782568b811ece222db0dd3575fd8bf2c958a..a2659a8a68a14273eeff16dd467d771ebe038b43 100644 (file)
@@ -692,6 +692,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
                        exfat_fs_error(sb,
                                "non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)",
                                i_size_read(dir), ei->dir.dir, ei->entry);
+                       kfree(es);
                        return -EIO;
                }
 
index 0565d5539d57c6678c81997228dc3b8116eb864c..a846ff555656cd876e77ae014b35e192b5ccd183 100644 (file)
@@ -203,6 +203,12 @@ enum {
        Opt_errors,
        Opt_discard,
        Opt_time_offset,
+
+       /* Deprecated options */
+       Opt_utf8,
+       Opt_debug,
+       Opt_namecase,
+       Opt_codepage,
 };
 
 static const struct constant_table exfat_param_enums[] = {
@@ -223,6 +229,14 @@ static const struct fs_parameter_spec exfat_parameters[] = {
        fsparam_enum("errors",                  Opt_errors, exfat_param_enums),
        fsparam_flag("discard",                 Opt_discard),
        fsparam_s32("time_offset",              Opt_time_offset),
+       __fsparam(NULL, "utf8",                 Opt_utf8, fs_param_deprecated,
+                 NULL),
+       __fsparam(NULL, "debug",                Opt_debug, fs_param_deprecated,
+                 NULL),
+       __fsparam(fs_param_is_u32, "namecase",  Opt_namecase,
+                 fs_param_deprecated, NULL),
+       __fsparam(fs_param_is_u32, "codepage",  Opt_codepage,
+                 fs_param_deprecated, NULL),
        {}
 };
 
@@ -278,6 +292,11 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
                        return -EINVAL;
                opts->time_offset = result.int_32;
                break;
+       case Opt_utf8:
+       case Opt_debug:
+       case Opt_namecase:
+       case Opt_codepage:
+               break;
        default:
                return -EINVAL;
        }
index 91eb4381cae5b79e2c8ee6831706b99261a584f2..ad2dbf6e492451f79da28e88f73a383e241074e3 100644 (file)
@@ -722,7 +722,7 @@ enum {
 #define EXT4_MAX_BLOCK_FILE_PHYS       0xFFFFFFFF
 
 /* Max logical block we can support */
-#define EXT4_MAX_LOGICAL_BLOCK         0xFFFFFFFF
+#define EXT4_MAX_LOGICAL_BLOCK         0xFFFFFFFE
 
 /*
  * Structure of an inode on the disk
index f2b577b315a09371210b180934f3c0019134756e..2b4b94542e34d35ab206da7429b7b7ff918399ed 100644 (file)
@@ -4832,6 +4832,28 @@ static const struct iomap_ops ext4_iomap_xattr_ops = {
        .iomap_begin            = ext4_iomap_xattr_begin,
 };
 
+static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
+{
+       u64 maxbytes;
+
+       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               maxbytes = inode->i_sb->s_maxbytes;
+       else
+               maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
+
+       if (*len == 0)
+               return -EINVAL;
+       if (start > maxbytes)
+               return -EFBIG;
+
+       /*
+        * Shrink request scope to what the fs can actually handle.
+        */
+       if (*len > maxbytes || (maxbytes - *len) < start)
+               *len = maxbytes - start;
+       return 0;
+}
+
 static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        __u64 start, __u64 len, bool from_es_cache)
 {
@@ -4852,6 +4874,15 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
                return -EBADR;
 
+       /*
+        * For bitmap files the maximum size limit could be smaller than
+        * s_maxbytes, so check len here manually instead of just relying on the
+        * generic check.
+        */
+       error = ext4_fiemap_check_ranges(inode, start, &len);
+       if (error)
+               return error;
+
        if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
                fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
                error = iomap_fiemap(inode, fieinfo, start, len,
index bfc1281fc4cbcbecca95cc2582690a02eb22e70f..0746532ba463d04f815e07e41775832bdc90dea9 100644 (file)
@@ -733,29 +733,6 @@ static void ext4_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
                fa->fsx_projid = from_kprojid(&init_user_ns, ei->i_projid);
 }
 
-/* copied from fs/ioctl.c */
-static int fiemap_check_ranges(struct super_block *sb,
-                              u64 start, u64 len, u64 *new_len)
-{
-       u64 maxbytes = (u64) sb->s_maxbytes;
-
-       *new_len = len;
-
-       if (len == 0)
-               return -EINVAL;
-
-       if (start > maxbytes)
-               return -EFBIG;
-
-       /*
-        * Shrink request scope to what the fs can actually handle.
-        */
-       if (len > maxbytes || (maxbytes - len) < start)
-               *new_len = maxbytes - start;
-
-       return 0;
-}
-
 /* So that the fiemap access checks can't overflow on 32 bit machines. */
 #define FIEMAP_MAX_EXTENTS     (UINT_MAX / sizeof(struct fiemap_extent))
 
@@ -765,8 +742,6 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
        struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
        struct fiemap_extent_info fieinfo = { 0, };
        struct inode *inode = file_inode(filp);
-       struct super_block *sb = inode->i_sb;
-       u64 len;
        int error;
 
        if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
@@ -775,11 +750,6 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
        if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
                return -EINVAL;
 
-       error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
-                                   &len);
-       if (error)
-               return error;
-
        fieinfo.fi_flags = fiemap.fm_flags;
        fieinfo.fi_extents_max = fiemap.fm_extent_count;
        fieinfo.fi_extents_start = ufiemap->fm_extents;
@@ -792,7 +762,8 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
        if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
                filemap_write_and_wait(inode->i_mapping);
 
-       error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start, len);
+       error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start,
+                       fiemap.fm_length);
        fiemap.fm_flags = fieinfo.fi_flags;
        fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
        if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
index c8a4e4c86e55c8cce4f38b95d7c94e6d0e3d0613..abb8b7081d7a44e1f82cd1c862d25dfb63c8bb40 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -70,7 +70,7 @@ static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
  */
 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
 {
-       unsigned int cpy, set;
+       size_t cpy, set;
 
        BUG_ON(nfdt->max_fds < ofdt->max_fds);
 
index 936a8ec6b48e464bddba050b8691c95df390e196..6306eaae378b266ff1ae049f7151c5586d0575e7 100644 (file)
@@ -528,10 +528,12 @@ lower_metapath:
 
                /* Advance in metadata tree. */
                (mp->mp_list[hgt])++;
-               if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
-                       if (!hgt)
+               if (hgt) {
+                       if (mp->mp_list[hgt] >= sdp->sd_inptrs)
+                               goto lower_metapath;
+               } else {
+                       if (mp->mp_list[hgt] >= sdp->sd_diptrs)
                                break;
-                       goto lower_metapath;
                }
 
 fill_up_metapath:
@@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
                                        ret = -ENOENT;
                                        goto unlock;
                                } else {
-                                       /* report a hole */
                                        iomap->offset = pos;
                                        iomap->length = length;
-                                       goto do_alloc;
+                                       goto hole_found;
                                }
                        }
                        iomap->length = size;
@@ -933,8 +934,6 @@ unlock:
        return ret;
 
 do_alloc:
-       iomap->addr = IOMAP_NULL_ADDR;
-       iomap->type = IOMAP_HOLE;
        if (flags & IOMAP_REPORT) {
                if (pos >= size)
                        ret = -ENOENT;
@@ -956,6 +955,9 @@ do_alloc:
                if (pos < size && height == ip->i_height)
                        ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
        }
+hole_found:
+       iomap->addr = IOMAP_NULL_ADDR;
+       iomap->type = IOMAP_HOLE;
        goto out;
 }
 
index 29f9b6684b7480083f0057b9074dc02cb7eb4d7b..bf70e3b14938e0698557c44ba414078f8f6b986e 100644 (file)
@@ -613,7 +613,7 @@ __acquires(&gl->gl_lockref.lock)
                                fs_err(sdp, "Error %d syncing glock \n", ret);
                                gfs2_dump_glock(NULL, gl, true);
                        }
-                       return;
+                       goto skip_inval;
                }
        }
        if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
@@ -633,6 +633,7 @@ __acquires(&gl->gl_lockref.lock)
                clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
        }
 
+skip_inval:
        gfs2_glock_hold(gl);
        /*
         * Check for an error encountered since we called go_sync and go_inval.
@@ -722,9 +723,6 @@ __acquires(&gl->gl_lockref.lock)
                        goto out_unlock;
                if (nonblock)
                        goto out_sched;
-               smp_mb();
-               if (atomic_read(&gl->gl_revokes) != 0)
-                       goto out_sched;
                set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
                GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
                gl->gl_target = gl->gl_demote_state;
index 70b2d3a1e86683ae91e26109e1f88c8923f87a25..5acd3ce30759b65622f2a9aa5e6f9e8613e7f9dc 100644 (file)
@@ -622,7 +622,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                                error = finish_no_open(file, NULL);
                }
                gfs2_glock_dq_uninit(ghs);
-               return error;
+               goto fail;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
        }
@@ -764,9 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                error = finish_open(file, dentry, gfs2_open_common);
        }
        gfs2_glock_dq_uninit(ghs);
+       gfs2_qa_put(ip);
        gfs2_glock_dq_uninit(ghs + 1);
        clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        gfs2_glock_put(io_gl);
+       gfs2_qa_put(dip);
        return error;
 
 fail_gunlock3:
@@ -776,7 +778,6 @@ fail_gunlock2:
        clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        gfs2_glock_put(io_gl);
 fail_free_inode:
-       gfs2_qa_put(ip);
        if (ip->i_gl) {
                glock_clear_object(ip->i_gl, ip);
                gfs2_glock_put(ip->i_gl);
@@ -1005,7 +1006,7 @@ out_gunlock:
 out_child:
        gfs2_glock_dq(ghs);
 out_parent:
-       gfs2_qa_put(ip);
+       gfs2_qa_put(dip);
        gfs2_holder_uninit(ghs);
        gfs2_holder_uninit(ghs + 1);
        return error;
index 3a75843ae580ff8ccbcc7f7121e7b93b9b45cf51..0644e58c6191b3335bfb2313912c01bbfddbb28f 100644 (file)
@@ -669,13 +669,13 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
        struct buffer_head *bh = bd->bd_bh;
        struct gfs2_glock *gl = bd->bd_gl;
 
+       sdp->sd_log_num_revoke++;
+       if (atomic_inc_return(&gl->gl_revokes) == 1)
+               gfs2_glock_hold(gl);
        bh->b_private = NULL;
        bd->bd_blkno = bh->b_blocknr;
        gfs2_remove_from_ail(bd); /* drops ref on bh */
        bd->bd_bh = NULL;
-       sdp->sd_log_num_revoke++;
-       if (atomic_inc_return(&gl->gl_revokes) == 1)
-               gfs2_glock_hold(gl);
        set_bit(GLF_LFLUSH, &gl->gl_flags);
        list_add(&bd->bd_list, &sdp->sd_log_revokes);
 }
@@ -1131,6 +1131,10 @@ int gfs2_logd(void *data)
 
        while (!kthread_should_stop()) {
 
+               if (gfs2_withdrawn(sdp)) {
+                       msleep_interruptible(HZ);
+                       continue;
+               }
                /* Check for errors writing to the journal */
                if (sdp->sd_log_error) {
                        gfs2_lm(sdp,
@@ -1139,6 +1143,7 @@ int gfs2_logd(void *data)
                                "prevent further damage.\n",
                                sdp->sd_fsname, sdp->sd_log_error);
                        gfs2_withdraw(sdp);
+                       continue;
                }
 
                did_flush = false;
index 5ea96757afc48ee8f65bbcdfbfe4e83ffe7399b2..cb2a11b458c66beeb96e7b1fe9fc850f29f54fe8 100644 (file)
@@ -263,7 +263,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
        struct super_block *sb = sdp->sd_vfs;
        struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 
-       bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
+       bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
        bio_set_dev(bio, sb->s_bdev);
        bio->bi_end_io = end_io;
        bio->bi_private = sdp;
@@ -509,12 +509,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
        unsigned int bsize = sdp->sd_sb.sb_bsize, off;
        unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
        unsigned int shift = PAGE_SHIFT - bsize_shift;
-       unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
+       unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
        struct gfs2_journal_extent *je;
        int sz, ret = 0;
        struct bio *bio = NULL;
        struct page *page = NULL;
-       bool bio_chained = false, done = false;
+       bool done = false;
        errseq_t since;
 
        memset(head, 0, sizeof(*head));
@@ -537,30 +537,30 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
                                off = 0;
                        }
 
-                       if (!bio || (bio_chained && !off)) {
-                               /* start new bio */
-                       } else {
-                               sz = bio_add_page(bio, page, bsize, off);
-                               if (sz == bsize)
-                                       goto block_added;
+                       if (bio && (off || block < blocks_submitted + max_blocks)) {
+                               sector_t sector = dblock << sdp->sd_fsb2bb_shift;
+
+                               if (bio_end_sector(bio) == sector) {
+                                       sz = bio_add_page(bio, page, bsize, off);
+                                       if (sz == bsize)
+                                               goto block_added;
+                               }
                                if (off) {
                                        unsigned int blocks =
                                                (PAGE_SIZE - off) >> bsize_shift;
 
                                        bio = gfs2_chain_bio(bio, blocks);
-                                       bio_chained = true;
                                        goto add_block_to_new_bio;
                                }
                        }
 
                        if (bio) {
-                               blocks_submitted = block + 1;
+                               blocks_submitted = block;
                                submit_bio(bio);
                        }
 
                        bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
                        bio->bi_opf = REQ_OP_READ;
-                       bio_chained = false;
 add_block_to_new_bio:
                        sz = bio_add_page(bio, page, bsize, off);
                        BUG_ON(sz != bsize);
@@ -568,7 +568,7 @@ block_added:
                        off += bsize;
                        if (off == PAGE_SIZE)
                                page = NULL;
-                       if (blocks_submitted < blocks_read + readahead_blocks) {
+                       if (blocks_submitted <= blocks_read + max_blocks) {
                                /* Keep at least one bio in flight */
                                continue;
                        }
index 4b72abcf83b29a2a1a49ebe0bf512508147bdd07..9856cc2e079504f9b9a8f6a7a0dabd77f43d9d1c 100644 (file)
@@ -252,7 +252,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        int num = 0;
 
        if (unlikely(gfs2_withdrawn(sdp)) &&
-           (!sdp->sd_jdesc || (blkno != sdp->sd_jdesc->jd_no_addr))) {
+           (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) {
                *bhp = NULL;
                return -EIO;
        }
index cc0c4b5800be93b658b82bbb4b4eab09bbece428..8259fef3f9863be547ef4241a37a46283668a710 100644 (file)
@@ -1051,8 +1051,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
        u32 x;
        int error = 0;
 
-       if (capable(CAP_SYS_RESOURCE) ||
-           sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+       if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                return 0;
 
        error = gfs2_quota_hold(ip, uid, gid);
@@ -1125,7 +1124,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
        int found;
 
        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
-               goto out;
+               return;
 
        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
                struct gfs2_quota_data *qd;
@@ -1162,7 +1161,6 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
                        qd_unlock(qda[x]);
        }
 
-out:
        gfs2_quota_unhold(ip);
 }
 
@@ -1210,9 +1208,6 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
        if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
                return 0;
 
-        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
-                return 0;
-
        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
                qd = ip->i_qadata->qa_qd[x];
 
@@ -1270,7 +1265,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
        if (ip->i_diskflags & GFS2_DIF_SYSTEM)
                return;
 
-       BUG_ON(ip->i_qadata->qa_ref <= 0);
+       if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
+                                ip->i_qadata->qa_ref > 0))
+               return;
        for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
                qd = ip->i_qadata->qa_qd[x];
 
index 7f9ca8ef40fc4867454d97d37f8072e36dc1dbaa..21ada332d55571021bda02fb55be74da43952388 100644 (file)
@@ -44,7 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
        int ret;
 
        ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
-       if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+       if (capable(CAP_SYS_RESOURCE) ||
+           sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return 0;
        ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
        if (ret)
index 37fc41632aa21a5156de24962ddc136f2feda792..956fced0a8ec2c47ed5eed4fe334f38cb674a47d 100644 (file)
@@ -1404,7 +1404,6 @@ out:
        if (ip->i_qadata)
                gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
        gfs2_rs_delete(ip, NULL);
-       gfs2_qa_put(ip);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index 9b64d40ab379391ba3a668974ec9954910824b18..aa087a5675af6ccafb6b30e9298f3d23cc8e2e76 100644 (file)
@@ -119,6 +119,12 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
        if (!sb_rdonly(sdp->sd_vfs))
                ret = gfs2_make_fs_ro(sdp);
 
+       if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
+               if (!ret)
+                       ret = -EIO;
+               clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+               goto skip_recovery;
+       }
        /*
         * Drop the glock for our journal so another node can recover it.
         */
@@ -159,10 +165,6 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
                wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
        }
 
-       if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
-               clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
-               goto skip_recovery;
-       }
        /*
         * Dequeue the "live" glock, but keep a reference so it's never freed.
         */
index c687f57fb65157602e0b2d4e993b0bb087094db5..bb25e3997d418a79df509d99a36ffa3aeb311980 100644 (file)
@@ -524,6 +524,7 @@ enum {
        REQ_F_OVERFLOW_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
+       REQ_F_NO_FILE_TABLE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -577,6 +578,8 @@ enum {
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
+       /* doesn't need file table for this request */
+       REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
 };
 
 struct async_poll {
@@ -616,6 +619,8 @@ struct io_kiocb {
        bool                            needs_fixed_file;
        u8                              opcode;
 
+       u16                             buf_index;
+
        struct io_ring_ctx      *ctx;
        struct list_head        list;
        unsigned int            flags;
@@ -677,8 +682,6 @@ struct io_op_def {
        unsigned                needs_mm : 1;
        /* needs req->file assigned */
        unsigned                needs_file : 1;
-       /* needs req->file assigned IFF fd is >= 0 */
-       unsigned                fd_non_neg : 1;
        /* hash wq insertion if file is a regular file */
        unsigned                hash_reg_file : 1;
        /* unbound wq insertion if file is a non-regular file */
@@ -781,8 +784,6 @@ static const struct io_op_def io_op_defs[] = {
                .needs_file             = 1,
        },
        [IORING_OP_OPENAT] = {
-               .needs_file             = 1,
-               .fd_non_neg             = 1,
                .file_table             = 1,
                .needs_fs               = 1,
        },
@@ -796,9 +797,8 @@ static const struct io_op_def io_op_defs[] = {
        },
        [IORING_OP_STATX] = {
                .needs_mm               = 1,
-               .needs_file             = 1,
-               .fd_non_neg             = 1,
                .needs_fs               = 1,
+               .file_table             = 1,
        },
        [IORING_OP_READ] = {
                .needs_mm               = 1,
@@ -833,8 +833,6 @@ static const struct io_op_def io_op_defs[] = {
                .buffer_select          = 1,
        },
        [IORING_OP_OPENAT2] = {
-               .needs_file             = 1,
-               .fd_non_neg             = 1,
                .file_table             = 1,
                .needs_fs               = 1,
        },
@@ -928,6 +926,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
                goto err;
 
        ctx->flags = p->flags;
+       init_waitqueue_head(&ctx->sqo_wait);
        init_waitqueue_head(&ctx->cq_wait);
        INIT_LIST_HEAD(&ctx->cq_overflow_list);
        init_completion(&ctx->completions[0]);
@@ -1291,7 +1290,7 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
        struct io_kiocb *req;
 
        req = ctx->fallback_req;
-       if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
+       if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
                return req;
 
        return NULL;
@@ -1378,7 +1377,7 @@ static void __io_free_req(struct io_kiocb *req)
        if (likely(!io_is_fallback_req(req)))
                kmem_cache_free(req_cachep, req);
        else
-               clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+               clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
 }
 
 struct req_batch {
@@ -1398,10 +1397,6 @@ static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
                for (i = 0; i < rb->to_free; i++) {
                        struct io_kiocb *req = rb->reqs[i];
 
-                       if (req->flags & REQ_F_FIXED_FILE) {
-                               req->file = NULL;
-                               percpu_ref_put(req->fixed_file_refs);
-                       }
                        if (req->flags & REQ_F_INFLIGHT)
                                inflight++;
                        __io_req_aux_free(req);
@@ -1674,7 +1669,7 @@ static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
        if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
                return false;
 
-       if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
+       if (req->file || req->io)
                rb->need_iter++;
 
        rb->reqs[rb->to_free++] = req;
@@ -2034,7 +2029,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
  * any file. For now, just ensure that anything potentially problematic is done
  * inline.
  */
-static bool io_file_supports_async(struct file *file)
+static bool io_file_supports_async(struct file *file, int rw)
 {
        umode_t mode = file_inode(file)->i_mode;
 
@@ -2043,7 +2038,13 @@ static bool io_file_supports_async(struct file *file)
        if (S_ISREG(mode) && file->f_op != &io_uring_fops)
                return true;
 
-       return false;
+       if (!(file->f_mode & FMODE_NOWAIT))
+               return false;
+
+       if (rw == READ)
+               return file->f_op->read_iter != NULL;
+
+       return file->f_op->write_iter != NULL;
 }
 
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2102,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
-       /* we own ->private, reuse it for the buffer index  / buffer ID */
-       req->rw.kiocb.private = (void *) (unsigned long)
-                                       READ_ONCE(sqe->buf_index);
+       req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -2147,7 +2146,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
        struct io_ring_ctx *ctx = req->ctx;
        size_t len = req->rw.len;
        struct io_mapped_ubuf *imu;
-       unsigned index, buf_index;
+       u16 index, buf_index;
        size_t offset;
        u64 buf_addr;
 
@@ -2155,7 +2154,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
        if (unlikely(!ctx->user_bufs))
                return -EFAULT;
 
-       buf_index = (unsigned long) req->rw.kiocb.private;
+       buf_index = req->buf_index;
        if (unlikely(buf_index >= ctx->nr_user_bufs))
                return -EFAULT;
 
@@ -2271,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
                                        bool needs_lock)
 {
        struct io_buffer *kbuf;
-       int bgid;
+       u16 bgid;
 
        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
-       bgid = (int) (unsigned long) req->rw.kiocb.private;
+       bgid = req->buf_index;
        kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
        if (IS_ERR(kbuf))
                return kbuf;
@@ -2365,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
        }
 
        /* buffer index only valid with fixed read/write, or buffer select  */
-       if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
+       if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
                return -EINVAL;
 
        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
@@ -2571,7 +2570,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(req->file))
+       if (force_nonblock && !io_file_supports_async(req->file, READ))
                goto copy_iov;
 
        iov_count = iov_iter_count(&iter);
@@ -2594,7 +2593,8 @@ copy_iov:
                        if (ret)
                                goto out_free;
                        /* any defer here is final, must blocking retry */
-                       if (!(req->flags & REQ_F_NOWAIT))
+                       if (!(req->flags & REQ_F_NOWAIT) &&
+                           !file_can_poll(req->file))
                                req->flags |= REQ_F_MUST_PUNT;
                        return -EAGAIN;
                }
@@ -2662,7 +2662,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(req->file))
+       if (force_nonblock && !io_file_supports_async(req->file, WRITE))
                goto copy_iov;
 
        /* file path doesn't support NOWAIT for non-direct_IO */
@@ -2716,7 +2716,8 @@ copy_iov:
                        if (ret)
                                goto out_free;
                        /* any defer here is final, must blocking retry */
-                       req->flags |= REQ_F_MUST_PUNT;
+                       if (!file_can_poll(req->file))
+                               req->flags |= REQ_F_MUST_PUNT;
                        return -EAGAIN;
                }
        }
@@ -2756,15 +2757,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static bool io_splice_punt(struct file *file)
-{
-       if (get_pipe_info(file))
-               return false;
-       if (!io_file_supports_async(file))
-               return true;
-       return !(file->f_flags & O_NONBLOCK);
-}
-
 static int io_splice(struct io_kiocb *req, bool force_nonblock)
 {
        struct io_splice *sp = &req->splice;
@@ -2772,19 +2764,16 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
        loff_t *poff_in, *poff_out;
-       long ret;
+       long ret = 0;
 
-       if (force_nonblock) {
-               if (io_splice_punt(in) || io_splice_punt(out))
-                       return -EAGAIN;
-               flags |= SPLICE_F_NONBLOCK;
-       }
+       if (force_nonblock)
+               return -EAGAIN;
 
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
-       ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
-       if (force_nonblock && ret == -EAGAIN)
-               return -EAGAIN;
+
+       if (sp->len)
+               ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
 
        io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
        req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -3355,8 +3344,12 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
        struct kstat stat;
        int ret;
 
-       if (force_nonblock)
+       if (force_nonblock) {
+               /* only need file table for an actual valid fd */
+               if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
+                       req->flags |= REQ_F_NO_FILE_TABLE;
                return -EAGAIN;
+       }
 
        if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
                return -EINVAL;
@@ -3502,7 +3495,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
        __io_sync_file_range(req);
-       io_put_req(req); /* put submission ref */
+       io_steal_work(req, workptr);
 }
 
 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
@@ -4142,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
        req->result = mask;
        init_task_work(&req->task_work, func);
        /*
-        * If this fails, then the task is exiting. Punt to one of the io-wq
-        * threads to ensure the work gets run, we can't always rely on exit
-        * cancelation taking care of this.
+        * If this fails, then the task is exiting. When a task exits, the
+        * work gets canceled, so just cancel this request as well instead
+        * of executing it. We can't safely execute it anyway, as we may not
+        * have the needed state needed for it anyway.
         */
        ret = task_work_add(tsk, &req->task_work, true);
        if (unlikely(ret)) {
+               WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
                task_work_add(tsk, &req->task_work, true);
        }
@@ -5015,15 +5010,16 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        int ret;
 
        /* Still need defer if there is pending req in defer list. */
-       if (!req_need_defer(req) && list_empty(&ctx->defer_list))
+       if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
                return 0;
 
-       if (!req->io && io_alloc_async_ctx(req))
-               return -EAGAIN;
-
-       ret = io_req_defer_prep(req, sqe);
-       if (ret < 0)
-               return ret;
+       if (!req->io) {
+               if (io_alloc_async_ctx(req))
+                       return -EAGAIN;
+               ret = io_req_defer_prep(req, sqe);
+               if (ret < 0)
+                       return ret;
+       }
 
        spin_lock_irq(&ctx->completion_lock);
        if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
@@ -5310,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (ret)
                return ret;
 
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
+       /* If the op doesn't have a file, we're not polling for it */
+       if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
                const bool in_async = io_wq_current_is_worker();
 
                if (req->result == -EAGAIN)
@@ -5364,15 +5361,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
        io_steal_work(req, workptr);
 }
 
-static int io_req_needs_file(struct io_kiocb *req, int fd)
-{
-       if (!io_op_defs[req->opcode].needs_file)
-               return 0;
-       if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg)
-               return 0;
-       return 1;
-}
-
 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
                                              int index)
 {
@@ -5410,14 +5398,11 @@ static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
 }
 
 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
-                          int fd, unsigned int flags)
+                          int fd)
 {
        bool fixed;
 
-       if (!io_req_needs_file(req, fd))
-               return 0;
-
-       fixed = (flags & IOSQE_FIXED_FILE);
+       fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
        if (unlikely(!fixed && req->needs_fixed_file))
                return -EBADF;
 
@@ -5429,7 +5414,7 @@ static int io_grab_files(struct io_kiocb *req)
        int ret = -EBADF;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->work.files)
+       if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
                return 0;
        if (!ctx->ring_file)
                return -EBADF;
@@ -5623,9 +5608,15 @@ fail_req:
                        io_double_put_req(req);
                }
        } else if (req->flags & REQ_F_FORCE_ASYNC) {
-               ret = io_req_defer_prep(req, sqe);
-               if (unlikely(ret < 0))
-                       goto fail_req;
+               if (!req->io) {
+                       ret = -EAGAIN;
+                       if (io_alloc_async_ctx(req))
+                               goto fail_req;
+                       ret = io_req_defer_prep(req, sqe);
+                       if (unlikely(ret < 0))
+                               goto fail_req;
+               }
+
                /*
                 * Never try inline submit of IOSQE_ASYNC is set, go straight
                 * to async execution.
@@ -5794,7 +5785,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                       struct io_submit_state *state, bool async)
 {
        unsigned int sqe_flags;
-       int id, fd;
+       int id;
 
        /*
         * All io need record the previous position, if LINK vs DARIN,
@@ -5846,8 +5837,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                                        IOSQE_ASYNC | IOSQE_FIXED_FILE |
                                        IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
 
-       fd = READ_ONCE(sqe->fd);
-       return io_req_set_file(state, req, fd, sqe_flags);
+       if (!io_op_defs[req->opcode].needs_file)
+               return 0;
+
+       return io_req_set_file(state, req, READ_ONCE(sqe->fd));
 }
 
 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
@@ -6039,6 +6032,7 @@ static int io_sq_thread(void *data)
                                finish_wait(&ctx->sqo_wait, &wait);
 
                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                               ret = 0;
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
@@ -6852,7 +6846,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 {
        int ret;
 
-       init_waitqueue_head(&ctx->sqo_wait);
        mmgrab(current->mm);
        ctx->sqo_mm = current->mm;
 
@@ -7327,7 +7320,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
         * it could cause shutdown to hang.
         */
        while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
-               cpu_relax();
+               cond_resched();
 
        io_kill_timeouts(ctx);
        io_poll_remove_all(ctx);
@@ -7356,11 +7349,9 @@ static int io_uring_release(struct inode *inode, struct file *file)
 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                                  struct files_struct *files)
 {
-       struct io_kiocb *req;
-       DEFINE_WAIT(wait);
-
        while (!list_empty_careful(&ctx->inflight_list)) {
-               struct io_kiocb *cancel_req = NULL;
+               struct io_kiocb *cancel_req = NULL, *req;
+               DEFINE_WAIT(wait);
 
                spin_lock_irq(&ctx->inflight_lock);
                list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
@@ -7400,6 +7391,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                         */
                        if (refcount_sub_and_test(2, &cancel_req->refs)) {
                                io_put_req(cancel_req);
+                               finish_wait(&ctx->inflight_wait, &wait);
                                continue;
                        }
                }
@@ -7407,8 +7399,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
                io_put_req(cancel_req);
                schedule();
+               finish_wait(&ctx->inflight_wait, &wait);
        }
-       finish_wait(&ctx->inflight_wait, &wait);
 }
 
 static int io_uring_flush(struct file *file, void *data)
@@ -7757,7 +7749,8 @@ err:
        return ret;
 }
 
-static int io_uring_create(unsigned entries, struct io_uring_params *p)
+static int io_uring_create(unsigned entries, struct io_uring_params *p,
+                          struct io_uring_params __user *params)
 {
        struct user_struct *user = NULL;
        struct io_ring_ctx *ctx;
@@ -7849,6 +7842,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
        p->cq_off.cqes = offsetof(struct io_rings, cqes);
 
+       p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
+                       IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
+                       IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
+
+       if (copy_to_user(params, p, sizeof(*p))) {
+               ret = -EFAULT;
+               goto err;
+       }
        /*
         * Install ring fd as the very last thing, so we don't risk someone
         * having closed it before we finish setup
@@ -7857,9 +7858,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        if (ret < 0)
                goto err;
 
-       p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
-                       IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
-                       IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
        trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
        return ret;
 err:
@@ -7875,7 +7873,6 @@ err:
 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
 {
        struct io_uring_params p;
-       long ret;
        int i;
 
        if (copy_from_user(&p, params, sizeof(p)))
@@ -7890,14 +7887,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
                        IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ))
                return -EINVAL;
 
-       ret = io_uring_create(entries, &p);
-       if (ret < 0)
-               return ret;
-
-       if (copy_to_user(params, &p, sizeof(p)))
-               return -EFAULT;
-
-       return ret;
+       return  io_uring_create(entries, &p, params);
 }
 
 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
index 282d45be6f453b0ad4ff084d00bc80864157dd25..5e80b40bc1b5cab63aa4472a2616aa06d6169a45 100644 (file)
@@ -55,6 +55,7 @@ EXPORT_SYMBOL(vfs_ioctl);
 static int ioctl_fibmap(struct file *filp, int __user *p)
 {
        struct inode *inode = file_inode(filp);
+       struct super_block *sb = inode->i_sb;
        int error, ur_block;
        sector_t block;
 
@@ -71,6 +72,13 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
        block = ur_block;
        error = bmap(inode, &block);
 
+       if (block > INT_MAX) {
+               error = -ERANGE;
+               pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n",
+                                   current->comm, task_pid_nr(current),
+                                   sb->s_id, filp);
+       }
+
        if (error)
                ur_block = 0;
        else
index bccf305ea9ce26d1f34ac9f10157af448c1f3174..d55e8f491a5e51d1d45c0053db38a41af883eb4f 100644 (file)
@@ -117,10 +117,7 @@ iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
 
        if (iomap->type == IOMAP_MAPPED) {
                addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
-               if (addr > INT_MAX)
-                       WARN(1, "would truncate bmap result\n");
-               else
-                       *bno = addr;
+               *bno = addr;
        }
        return 0;
 }
index 1abf126c2df45b7e6d5815745db756db6b4659d0..a60df88efc4049f9f24ca90e96fa4e951f7f2eff 100644 (file)
@@ -118,8 +118,6 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
 
        nfss->fscache_key = NULL;
        nfss->fscache = NULL;
-       if (!(nfss->options & NFS_OPTION_FSCACHE))
-               return;
        if (!uniq) {
                uniq = "";
                ulen = 1;
@@ -188,7 +186,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
        /* create a cache index for looking up filehandles */
        nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
                                               &nfs_fscache_super_index_def,
-                                              key, sizeof(*key) + ulen,
+                                              &key->key,
+                                              sizeof(key->key) + ulen,
                                               NULL, 0,
                                               nfss, 0, true);
        dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
@@ -226,6 +225,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
        }
 }
 
+static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
+                                 struct nfs_inode *nfsi)
+{
+       memset(auxdata, 0, sizeof(*auxdata));
+       auxdata->mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
+       auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+       auxdata->ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
+       auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+
+       if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+               auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+}
+
 /*
  * Initialise the per-inode cache cookie pointer for an NFS inode.
  */
@@ -239,14 +251,7 @@ void nfs_fscache_init_inode(struct inode *inode)
        if (!(nfss->fscache && S_ISREG(inode->i_mode)))
                return;
 
-       memset(&auxdata, 0, sizeof(auxdata));
-       auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
-       auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
-       auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
-       auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
-
-       if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
-               auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+       nfs_fscache_update_auxdata(&auxdata, nfsi);
 
        nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
                                               &nfs_fscache_inode_object_def,
@@ -266,11 +271,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
 
        dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
 
-       memset(&auxdata, 0, sizeof(auxdata));
-       auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
-       auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
-       auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
-       auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+       nfs_fscache_update_auxdata(&auxdata, nfsi);
        fscache_relinquish_cookie(cookie, &auxdata, false);
        nfsi->fscache = NULL;
 }
@@ -310,11 +311,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
        if (!fscache_cookie_valid(cookie))
                return;
 
-       memset(&auxdata, 0, sizeof(auxdata));
-       auxdata.mtime_sec  = nfsi->vfs_inode.i_mtime.tv_sec;
-       auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
-       auxdata.ctime_sec  = nfsi->vfs_inode.i_ctime.tv_sec;
-       auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+       nfs_fscache_update_auxdata(&auxdata, nfsi);
 
        if (inode_is_open_for_write(inode)) {
                dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
index 35c8cb2d76372087d29326ebfd66ec5ee4ed76d9..dda5c3e65d8d690bfc49333fbd8191d0dd897e83 100644 (file)
@@ -30,6 +30,7 @@
 #define encode_dirpath_sz      (1 + XDR_QUADLEN(MNTPATHLEN))
 #define MNT_status_sz          (1)
 #define MNT_fhandle_sz         XDR_QUADLEN(NFS2_FHSIZE)
+#define MNT_fhandlev3_sz       XDR_QUADLEN(NFS3_FHSIZE)
 #define MNT_authflav3_sz       (1 + NFS_MAX_SECFLAVORS)
 
 /*
@@ -37,7 +38,7 @@
  */
 #define MNT_enc_dirpath_sz     encode_dirpath_sz
 #define MNT_dec_mountres_sz    (MNT_status_sz + MNT_fhandle_sz)
-#define MNT_dec_mountres3_sz   (MNT_status_sz + MNT_fhandle_sz + \
+#define MNT_dec_mountres3_sz   (MNT_status_sz + MNT_fhandlev3_sz + \
                                 MNT_authflav3_sz)
 
 /*
index c5c3fc6e6c600b427c991d4be3a83879bbd2b48c..26c94b32d6f49e15f2c4e4a50d219f6a8592e034 100644 (file)
@@ -253,37 +253,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
 
 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
-       struct posix_acl *alloc = NULL, *dfacl = NULL;
+       struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
        int status;
 
        if (S_ISDIR(inode->i_mode)) {
                switch(type) {
                case ACL_TYPE_ACCESS:
-                       alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT);
+                       alloc = get_acl(inode, ACL_TYPE_DEFAULT);
                        if (IS_ERR(alloc))
                                goto fail;
+                       dfacl = alloc;
                        break;
 
                case ACL_TYPE_DEFAULT:
-                       dfacl = acl;
-                       alloc = acl = get_acl(inode, ACL_TYPE_ACCESS);
+                       alloc = get_acl(inode, ACL_TYPE_ACCESS);
                        if (IS_ERR(alloc))
                                goto fail;
+                       dfacl = acl;
+                       acl = alloc;
                        break;
                }
        }
 
        if (acl == NULL) {
-               alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+               alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
                if (IS_ERR(alloc))
                        goto fail;
+               acl = alloc;
        }
        status = __nfs3_proc_setacls(inode, acl, dfacl);
-       posix_acl_release(alloc);
+out:
+       if (acl != orig)
+               posix_acl_release(acl);
+       if (dfacl != orig)
+               posix_acl_release(dfacl);
        return status;
 
 fail:
-       return PTR_ERR(alloc);
+       status = PTR_ERR(alloc);
+       goto out;
 }
 
 const struct xattr_handler *nfs3_xattr_handlers[] = {
index 512afb1c786773761691ed6820b0f2268113d4e2..9056f3dd380e54e1a6d9c97aeba2c91748ba7ed0 100644 (file)
@@ -6347,7 +6347,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
                .rpc_client = server->client,
                .rpc_message = &msg,
                .callback_ops = &nfs4_delegreturn_ops,
-               .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | RPC_TASK_TIMEOUT,
+               .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
        };
        int status = 0;
 
@@ -7891,6 +7891,7 @@ static void
 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
 {
        struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
+       struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
        struct nfs_client *clp = args->client;
 
        switch (task->tk_status) {
@@ -7899,6 +7900,12 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
                nfs4_schedule_session_recovery(clp->cl_session,
                                task->tk_status);
        }
+       if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
+                       res->dir != NFS4_CDFS4_BOTH) {
+               rpc_task_close_connection(task);
+               if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
+                       rpc_restart_call(task);
+       }
 }
 
 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
@@ -7921,6 +7928,7 @@ int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
        struct nfs41_bind_conn_to_session_args args = {
                .client = clp,
                .dir = NFS4_CDFC4_FORE_OR_BOTH,
+               .retries = 0,
        };
        struct nfs41_bind_conn_to_session_res res;
        struct rpc_message msg = {
@@ -9191,8 +9199,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
 
        task = rpc_run_task(&task_setup_data);
-       if (IS_ERR(task))
-               return ERR_CAST(task);
+
        status = rpc_wait_for_completion_task(task);
        if (status != 0)
                goto out;
index ac93715c05a49ba4fdd81e5b7b2c3a7a249212d7..a8dc25ce48bba8543234b2bb5fbef8999db76bb5 100644 (file)
@@ -734,9 +734,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
                state = new;
                state->owner = owner;
                atomic_inc(&owner->so_count);
-               list_add_rcu(&state->inode_states, &nfsi->open_states);
                ihold(inode);
                state->inode = inode;
+               list_add_rcu(&state->inode_states, &nfsi->open_states);
                spin_unlock(&inode->i_lock);
                /* Note: The reclaim code dictates that we add stateless
                 * and read-only stateids to the end of the list */
index f61f96603df78a1ef20aecb341cf4fc283549fb5..6ca421cbe19c9cadc9181af2b9f3a7a4d463e5d3 100644 (file)
@@ -752,7 +752,7 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
                .callback_ops = call_ops,
                .callback_data = hdr,
                .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags,
+               .flags = RPC_TASK_ASYNC | flags,
        };
 
        hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
@@ -950,7 +950,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
                                        hdr->cred,
                                        NFS_PROTO(hdr->inode),
                                        desc->pg_rpc_callops,
-                                       desc->pg_ioflags, 0);
+                                       desc->pg_ioflags,
+                                       RPC_TASK_CRED_NOREF);
        return ret;
 }
 
index b8d78f3933651a2c042bf92ff3bebed471d6c69e..dd2e14f5875d852cefaf4dab5887b01b2f5cbbd9 100644 (file)
@@ -1332,13 +1332,15 @@ _pnfs_return_layout(struct inode *ino)
                        !valid_layout) {
                spin_unlock(&ino->i_lock);
                dprintk("NFS: %s no layout segments to return\n", __func__);
-               goto out_put_layout_hdr;
+               goto out_wait_layoutreturn;
        }
 
        send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
        spin_unlock(&ino->i_lock);
        if (send)
                status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
+out_wait_layoutreturn:
+       wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
 out_put_layout_hdr:
        pnfs_free_lseg_list(&tmp_list);
        pnfs_put_layout_hdr(lo);
@@ -1456,18 +1458,15 @@ retry:
        /* lo ref dropped in pnfs_roc_release() */
        layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
        /* If the creds don't match, we can't compound the layoutreturn */
-       if (!layoutreturn)
+       if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
                goto out_noroc;
-       if (cred_fscmp(cred, lc_cred) != 0)
-               goto out_noroc_put_cred;
 
        roc = layoutreturn;
        pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
        res->lrs_present = 0;
        layoutreturn = false;
-
-out_noroc_put_cred:
        put_cred(lc_cred);
+
 out_noroc:
        spin_unlock(&ino->i_lock);
        rcu_read_unlock();
index e7ddbce48321dfcba588eba56e6f3495cf65226f..679767ac258d0c2ed703df26b8a0dffa0faf25a2 100644 (file)
@@ -536,7 +536,8 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
                        nfs_init_commit(data, NULL, NULL, cinfo);
                        nfs_initiate_commit(NFS_CLIENT(inode), data,
                                            NFS_PROTO(data->inode),
-                                           data->mds_ops, how, 0);
+                                           data->mds_ops, how,
+                                           RPC_TASK_CRED_NOREF);
                } else {
                        nfs_init_commit(data, NULL, data->lseg, cinfo);
                        initiate_commit(data, how);
index 59ef3b13ccca2521a996c776aea55363ae05bbe1..7a70287f21a2c1e3f1a13192db3795df8330d53d 100644 (file)
@@ -185,7 +185,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
 
        rcu_read_lock();
        list_for_each_entry_rcu(server, head, client_link) {
-               if (!nfs_sb_active(server->super))
+               if (!(server->super && nfs_sb_active(server->super)))
                        continue;
                rcu_read_unlock();
                if (last)
@@ -1189,7 +1189,6 @@ static void nfs_get_cache_cookie(struct super_block *sb,
                        uniq = ctx->fscache_uniq;
                        ulen = strlen(ctx->fscache_uniq);
                }
-               return;
        }
 
        nfs_fscache_get_super_cookie(sb, uniq, ulen);
index df4b87c30ac9d6ad116dd4f948f5964bccf435ea..1e767f779c498c3b57db16d904b4fa25bdb183f1 100644 (file)
@@ -1695,7 +1695,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
                .callback_ops = call_ops,
                .callback_data = data,
                .workqueue = nfsiod_workqueue,
-               .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | flags,
+               .flags = RPC_TASK_ASYNC | flags,
                .priority = priority,
        };
        /* Set up the initial task struct.  */
@@ -1813,7 +1813,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
        nfs_init_commit(data, head, NULL, cinfo);
        atomic_inc(&cinfo->mds->rpcs_out);
        return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
-                                  data->mds_ops, how, 0);
+                                  data->mds_ops, how, RPC_TASK_CRED_NOREF);
 }
 
 /*
index 5435a40f82bec95b140451057ca8415f36f5381e..c18459cea6f41867426c8aab8dedc5e2d4bf0cab 100644 (file)
@@ -520,7 +520,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
        BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
        BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
 
-       BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20);
+       BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
 
        mask = fanotify_group_event_mask(group, iter_info, mask, data,
                                         data_type);
index 8e4f1ace467c1a25db933e33bce9de47d6995a26..1de77f1a600b20eac9414fd2d4716396b97de101 100644 (file)
@@ -275,7 +275,6 @@ static ssize_t dlmfs_file_write(struct file *filp,
                                loff_t *ppos)
 {
        int bytes_left;
-       ssize_t writelen;
        char *lvb_buf;
        struct inode *inode = file_inode(filp);
 
@@ -285,32 +284,30 @@ static ssize_t dlmfs_file_write(struct file *filp,
        if (*ppos >= i_size_read(inode))
                return -ENOSPC;
 
+       /* don't write past the lvb */
+       if (count > i_size_read(inode) - *ppos)
+               count = i_size_read(inode) - *ppos;
+
        if (!count)
                return 0;
 
        if (!access_ok(buf, count))
                return -EFAULT;
 
-       /* don't write past the lvb */
-       if ((count + *ppos) > i_size_read(inode))
-               writelen = i_size_read(inode) - *ppos;
-       else
-               writelen = count - *ppos;
-
-       lvb_buf = kmalloc(writelen, GFP_NOFS);
+       lvb_buf = kmalloc(count, GFP_NOFS);
        if (!lvb_buf)
                return -ENOMEM;
 
-       bytes_left = copy_from_user(lvb_buf, buf, writelen);
-       writelen -= bytes_left;
-       if (writelen)
-               user_dlm_write_lvb(inode, lvb_buf, writelen);
+       bytes_left = copy_from_user(lvb_buf, buf, count);
+       count -= bytes_left;
+       if (count)
+               user_dlm_write_lvb(inode, lvb_buf, count);
 
        kfree(lvb_buf);
 
-       *ppos = *ppos + writelen;
-       mlog(0, "wrote %zd bytes\n", writelen);
-       return writelen;
+       *ppos = *ppos + count;
+       mlog(0, "wrote %zu bytes\n", count);
+       return count;
 }
 
 static void dlmfs_init_once(void *foo)
index 475c61f53f0fe43b7b8fbd8cfa72c3b6102cbdeb..ed5c1078919ccbc44b6f4f099744b07593aa5ddc 100644 (file)
@@ -783,6 +783,9 @@ static struct ovl_fh *ovl_fid_to_fh(struct fid *fid, int buflen, int fh_type)
        if (fh_type != OVL_FILEID_V0)
                return ERR_PTR(-EINVAL);
 
+       if (buflen <= OVL_FH_WIRE_OFFSET)
+               return ERR_PTR(-EINVAL);
+
        fh = kzalloc(buflen, GFP_KERNEL);
        if (!fh)
                return ERR_PTR(-ENOMEM);
index b0d42ece4d7ccc470534576817278f8cdff0f08c..981f11ec51bc64a4c491fb0d6cfc98f306279480 100644 (file)
@@ -58,6 +58,24 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
                if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
                        attr->ia_valid &= ~ATTR_MODE;
 
+               /*
+                * We might have to translate ovl file into real file object
+                * once use cases emerge.  For now, simply don't let underlying
+                * filesystem rely on attr->ia_file
+                */
+               attr->ia_valid &= ~ATTR_FILE;
+
+               /*
+                * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
+                * set.  Overlayfs does not pass O_TRUNC flag to underlying
+                * filesystem during open -> do not pass ATTR_OPEN.  This
+                * disables optimization in fuse which assumes open(O_TRUNC)
+                * already set file size to 0.  But we never passed O_TRUNC to
+                * fuse.  So by clearing ATTR_OPEN, fuse will be forced to send
+                * setattr request to server.
+                */
+               attr->ia_valid &= ~ATTR_OPEN;
+
                inode_lock(upperdentry->d_inode);
                old_cred = ovl_override_creds(dentry->d_sb);
                err = notify_change(upperdentry, attr, NULL);
index 49f6d7ff21394f5e15e440ade00ba3363e27cf68..1106137c747a3aab8f8340751eb713d6d936797f 100644 (file)
@@ -261,14 +261,13 @@ static int propagate_one(struct mount *m)
        child = copy_tree(last_source, last_source->mnt.mnt_root, type);
        if (IS_ERR(child))
                return PTR_ERR(child);
+       read_seqlock_excl(&mount_lock);
        mnt_set_mountpoint(m, mp, child);
+       if (m->mnt_master != dest_master)
+               SET_MNT_MARK(m->mnt_master);
+       read_sequnlock_excl(&mount_lock);
        last_dest = m;
        last_source = child;
-       if (m->mnt_master != dest_master) {
-               read_seqlock_excl(&mount_lock);
-               SET_MNT_MARK(m->mnt_master);
-               read_sequnlock_excl(&mount_lock);
-       }
        hlist_add_head(&child->mnt_hash, list);
        return count_mounts(m->mnt_ns, child);
 }
index 4735defc46ee6a9912d4632f5d041f4d76371a30..4e53efbd621dbbae6818ba0520c9564c2d224c96 100644 (file)
@@ -1118,6 +1118,10 @@ long do_splice(struct file *in, loff_t __user *off_in,
        loff_t offset;
        long ret;
 
+       if (unlikely(!(in->f_mode & FMODE_READ) ||
+                    !(out->f_mode & FMODE_WRITE)))
+               return -EBADF;
+
        ipipe = get_pipe_info(in);
        opipe = get_pipe_info(out);
 
@@ -1125,12 +1129,6 @@ long do_splice(struct file *in, loff_t __user *off_in,
                if (off_in || off_out)
                        return -ESPIPE;
 
-               if (!(in->f_mode & FMODE_READ))
-                       return -EBADF;
-
-               if (!(out->f_mode & FMODE_WRITE))
-                       return -EBADF;
-
                /* Splicing to self would be fun, but... */
                if (ipipe == opipe)
                        return -EINVAL;
@@ -1153,9 +1151,6 @@ long do_splice(struct file *in, loff_t __user *off_in,
                        offset = out->f_pos;
                }
 
-               if (unlikely(!(out->f_mode & FMODE_WRITE)))
-                       return -EBADF;
-
                if (unlikely(out->f_flags & O_APPEND))
                        return -EINVAL;
 
@@ -1440,15 +1435,11 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
        error = -EBADF;
        in = fdget(fd_in);
        if (in.file) {
-               if (in.file->f_mode & FMODE_READ) {
-                       out = fdget(fd_out);
-                       if (out.file) {
-                               if (out.file->f_mode & FMODE_WRITE)
-                                       error = do_splice(in.file, off_in,
-                                                         out.file, off_out,
-                                                         len, flags);
-                               fdput(out);
-                       }
+               out = fdget(fd_out);
+               if (out.file) {
+                       error = do_splice(in.file, off_in, out.file, off_out,
+                                         len, flags);
+                       fdput(out);
                }
                fdput(in);
        }
@@ -1503,7 +1494,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
         * Check pipe occupancy without the inode lock first. This function
         * is speculative anyways, so missing one is ok.
         */
-       if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+       if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage))
                return 0;
 
        ret = 0;
@@ -1770,6 +1761,10 @@ static long do_tee(struct file *in, struct file *out, size_t len,
        struct pipe_inode_info *opipe = get_pipe_info(out);
        int ret = -EINVAL;
 
+       if (unlikely(!(in->f_mode & FMODE_READ) ||
+                    !(out->f_mode & FMODE_WRITE)))
+               return -EBADF;
+
        /*
         * Duplicate the contents of ipipe to opipe without actually
         * copying the data.
@@ -1795,7 +1790,7 @@ static long do_tee(struct file *in, struct file *out, size_t len,
 
 SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
 {
-       struct fd in;
+       struct fd in, out;
        int error;
 
        if (unlikely(flags & ~SPLICE_F_ALL))
@@ -1807,14 +1802,10 @@ SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
        error = -EBADF;
        in = fdget(fdin);
        if (in.file) {
-               if (in.file->f_mode & FMODE_READ) {
-                       struct fd out = fdget(fdout);
-                       if (out.file) {
-                               if (out.file->f_mode & FMODE_WRITE)
-                                       error = do_tee(in.file, out.file,
-                                                       len, flags);
-                               fdput(out);
-                       }
+               out = fdget(fdout);
+               if (out.file) {
+                       error = do_tee(in.file, out.file, len, flags);
+                       fdput(out);
                }
                fdput(in);
        }
index cd352530eca906eb64d722592c85e2fa3ef02886..a288cd60d2aed3f58f442a2a768c8ac785c015ec 100644 (file)
@@ -1302,8 +1302,8 @@ int get_tree_bdev(struct fs_context *fc,
        mutex_lock(&bdev->bd_fsfreeze_mutex);
        if (bdev->bd_fsfreeze_count > 0) {
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               blkdev_put(bdev, mode);
                warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+               blkdev_put(bdev, mode);
                return -EBUSY;
        }
 
index 8cdbd53d780ca76af5d7fb1ee5d670decec07dc6..f985a3fbbb36a15e202a7b31c80dff518b5651cf 100644 (file)
@@ -79,13 +79,9 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
                             struct shash_desc *inhash)
 {
        struct ubifs_auth_node *auth = node;
-       u8 *hash;
+       u8 hash[UBIFS_HASH_ARR_SZ];
        int err;
 
-       hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS);
-       if (!hash)
-               return -ENOMEM;
-
        {
                SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
 
@@ -94,21 +90,16 @@ int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
 
                err = crypto_shash_final(hash_desc, hash);
                if (err)
-                       goto out;
+                       return err;
        }
 
        err = ubifs_hash_calc_hmac(c, hash, auth->hmac);
        if (err)
-               goto out;
+               return err;
 
        auth->ch.node_type = UBIFS_AUTH_NODE;
        ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0);
-
-       err = 0;
-out:
-       kfree(hash);
-
-       return err;
+       return 0;
 }
 
 static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c,
index 743928efffc124c5bd40af29eb30c394056af029..49fe062ce45ec2f0c675582d0c0bcf9ac3261995 100644 (file)
@@ -1375,7 +1375,6 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
        struct ubifs_info *c = inode->i_sb->s_fs_info;
        struct ubifs_budget_req req = { .dirtied_ino = 1,
                        .dirtied_ino_d = ALIGN(ui->data_len, 8) };
-       int iflags = I_DIRTY_TIME;
        int err, release;
 
        if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
@@ -1393,11 +1392,8 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
        if (flags & S_MTIME)
                inode->i_mtime = *time;
 
-       if (!(inode->i_sb->s_flags & SB_LAZYTIME))
-               iflags |= I_DIRTY_SYNC;
-
        release = ui->dirty;
-       __mark_inode_dirty(inode, iflags);
+       __mark_inode_dirty(inode, I_DIRTY_SYNC);
        mutex_unlock(&ui->ui_mutex);
        if (release)
                ubifs_release_budget(c, &req);
index b28ac4dfb4070aef4203cfee08fcb5e65777007f..01fcf79750472b085cc5f6a30cbf85bc1304e08f 100644 (file)
@@ -601,18 +601,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
        struct ubifs_scan_node *snod;
        int n_nodes = 0;
        int err;
-       u8 *hash, *hmac;
+       u8 hash[UBIFS_HASH_ARR_SZ];
+       u8 hmac[UBIFS_HMAC_ARR_SZ];
 
        if (!ubifs_authenticated(c))
                return sleb->nodes_cnt;
 
-       hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS);
-       hmac = kmalloc(c->hmac_desc_len, GFP_NOFS);
-       if (!hash || !hmac) {
-               err = -ENOMEM;
-               goto out;
-       }
-
        list_for_each_entry(snod, &sleb->nodes, list) {
 
                n_nodes++;
@@ -662,9 +656,6 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
                err = 0;
        }
 out:
-       kfree(hash);
-       kfree(hmac);
-
        return err ? err : n_nodes - n_not_auth;
 }
 
index 675e269893765a0d8060da04949144236ba48669..8fe03b4a0d2b038e99b4920e8539e1f5363e55d8 100644 (file)
@@ -164,7 +164,7 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
                goto fail_free;
        }
 
-       err = super_setup_bdi_name(sb, "vboxsf-%s.%d", fc->source, sbi->bdi_id);
+       err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id);
        if (err)
                goto fail_free;
 
index e13265e65871f4fc1f09e13bb410c3f44d2d6882..91608d9bfc6aad9b346d8e4622590e99339d8a6d 100644 (file)
@@ -876,6 +876,9 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
        struct simple_xattr *new_xattr = NULL;
        int err = 0;
 
+       if (removed_size)
+               *removed_size = -1;
+
        /* value == NULL means remove */
        if (value) {
                new_xattr = simple_xattr_alloc(value, size);
@@ -914,9 +917,6 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
                list_add(&new_xattr->list, &xattrs->head);
                xattr = NULL;
        }
-
-       if (removed_size)
-               *removed_size = -1;
 out:
        spin_unlock(&xattrs->lock);
        if (xattr) {
index 2388737395502101e308ec7c77b51b7f422dc194..5aa8705df87e730dcc454574ff5a133b0c1f939e 100644 (file)
@@ -48,7 +48,7 @@
   #ifdef CONFIG_NEED_MULTIPLE_NODES
     #define cpumask_of_node(node)      ((node) == 0 ? cpu_online_mask : cpu_none_mask)
   #else
-    #define cpumask_of_node(node)      ((void)node, cpu_online_mask)
+    #define cpumask_of_node(node)      ((void)(node), cpu_online_mask)
   #endif
 #endif
 #ifndef pcibus_to_node
index 99134d4f35eb7cc988e975992c1a2bd34ff6949d..320f8112a0f84e3bf3aebf248e374ce2be8d628d 100644 (file)
@@ -48,7 +48,7 @@ struct videomode;
  * @MODE_HSYNC: hsync out of range
  * @MODE_VSYNC: vsync out of range
  * @MODE_H_ILLEGAL: mode has illegal horizontal timings
- * @MODE_V_ILLEGAL: mode has illegal horizontal timings
+ * @MODE_V_ILLEGAL: mode has illegal vertical timings
  * @MODE_BAD_WIDTH: requires an unsupported linepitch
  * @MODE_NOMODE: no mode with a matching name
  * @MODE_NO_INTERLACE: interlaced mode not supported
index 26f0ecf401eaa5787a5eba474dd62942e629ed11..0bbfd647f5c6dec3c12c52c621a94cf57975ebc5 100644 (file)
@@ -65,6 +65,7 @@ struct amba_device {
        struct device           dev;
        struct resource         res;
        struct clk              *pclk;
+       struct device_dma_parameters dma_parms;
        unsigned int            periphid;
        unsigned int            cid;
        struct amba_cs_uci_id   uci;
index ee577a83cfe67a8af1ed3ed83ffa7ae345eb3787..7367150f962a333b7f689c84e3ab2223a7281b16 100644 (file)
@@ -219,6 +219,7 @@ struct backing_dev_info {
        wait_queue_head_t wb_waitq;
 
        struct device *dev;
+       char dev_name[64];
        struct device *owner;
 
        struct timer_list laptop_mode_wb_timer;
index f88197c1ffc2db0fcdbc5b9ab0c9d8b0f95fec43..c9ad5c3b7b4b263dadc3a54d4e58cf09eaa2973d 100644 (file)
@@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
                                  (1 << WB_async_congested));
 }
 
-extern const char *bdi_unknown_name;
-
-static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
-{
-       if (!bdi || !bdi->dev)
-               return bdi_unknown_name;
-       return dev_name(bdi->dev);
-}
+const char *bdi_dev_name(struct backing_dev_info *bdi);
 
 #endif /* _LINUX_BACKING_DEV_H */
index 6462c54478726c3f70cbb5099471a61a7fa9fa50..f4b77018c625a2156842fedbd68063b000ce0e0c 100644 (file)
 #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN        (1 << 0)
 #define BCM54810_SHD_CLK_CTL                   0x3
 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN         (1 << 9)
+#define BCM54810_SHD_SCR3_TRDDAPD              0x0100
 
 /* BCM54612E Registers */
 #define BCM54612E_EXP_SPARE0           (MII_BCM54XX_EXP_SEL_ETC + 0x34)
index 034b0a644efcc49ececa0fcc9b6c95ecfd4daee2..448c91bf543b7394bdbcde5c8b8458df131c4c33 100644 (file)
@@ -356,4 +356,10 @@ static inline void *offset_to_ptr(const int *off)
 /* &a[0] degrades to a pointer: a different type from an array */
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 
+/*
+ * This is needed in functions which generate the stack canary, see
+ * arch/x86/kernel/smpboot.c::start_secondary() for an example.
+ */
+#define prevent_tail_call_optimization()       mb()
+
 #endif /* __LINUX_COMPILER_H */
index 4f005d95ce882bcc563c99194f8698d776f32fb3..8537e9282a658a5edcb06c928aff0b42e60f7284 100644 (file)
@@ -521,6 +521,15 @@ struct cper_sec_pcie {
        u8      aer_info[96];
 };
 
+/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10  */
+struct cper_sec_fw_err_rec_ref {
+       u8 record_type;
+       u8 revision;
+       u8 reserved[6];
+       u64 record_identifier;
+       guid_t record_identifier_guid;
+};
+
 /* Reset to default packing */
 #pragma pack()
 
index fa35b52e0002e03a4a2476f2f854daade6e6742d..9a72214496e58931d8632f98d6e11b28a5e4939e 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/fs.h>
-#include <linux/bpf-cgroup.h>
 
 #define DEVCG_ACC_MKNOD 1
 #define DEVCG_ACC_READ  2
 #define DEVCG_DEV_CHAR  2
 #define DEVCG_DEV_ALL   4  /* this represents all devices */
 
-#ifdef CONFIG_CGROUP_DEVICE
-int devcgroup_check_permission(short type, u32 major, u32 minor,
-                              short access);
-#else
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
-                                            short access)
-{ return 0; }
-#endif
 
 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+                              short access);
 static inline int devcgroup_inode_permission(struct inode *inode, int mask)
 {
        short type, access = 0;
@@ -61,6 +54,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev)
 }
 
 #else
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+                              short access)
+{ return 0; }
 static inline int devcgroup_inode_permission(struct inode *inode, int mask)
 { return 0; }
 static inline int devcgroup_inode_mknod(int mode, dev_t dev)
index 1ade486fc2bbcf99580c82c531ece8622b2077aa..57bcef6f988a2d237f8e986bc9cefd01c639340d 100644 (file)
@@ -329,13 +329,12 @@ struct dma_buf {
 
 /**
  * struct dma_buf_attach_ops - importer operations for an attachment
- * @move_notify: [optional] notification that the DMA-buf is moving
  *
  * Attachment operations implemented by the importer.
  */
 struct dma_buf_attach_ops {
        /**
-        * @move_notify
+        * @move_notify: [optional] notification that the DMA-buf is moving
         *
         * If this callback is provided the framework can avoid pinning the
         * backing store while mappings exists.
index 21065c04c4ac3bb380398bc45a6a7f28cdaa1d00..e1c03339918f99b2d48ccf245ec177baec04599d 100644 (file)
@@ -83,9 +83,9 @@ enum dma_transfer_direction {
 /**
  * Interleaved Transfer Request
  * ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
  * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
  * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
  *  that when repeated an integral number of times, specifies the transfer.
  * A transfer template is specification of a Frame, the number of times
@@ -341,13 +341,11 @@ struct dma_chan {
  * @chan: driver channel device
  * @device: sysfs device
  * @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
  */
 struct dma_chan_dev {
        struct dma_chan *chan;
        struct device device;
        int dev_id;
-       atomic_t *idr_ref;
 };
 
 /**
@@ -835,6 +833,8 @@ struct dma_device {
        int dev_id;
        struct device *dev;
        struct module *owner;
+       struct ida chan_ida;
+       struct mutex chan_mutex;        /* to protect chan_ida */
 
        u32 src_addr_widths;
        u32 dst_addr_widths;
@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
  * dmaengine_synchronize() needs to be called before it is safe to free
  * any memory that is accessed by previously submitted descriptors or before
  * freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
  *
  * This function can be called from atomic context as well as from within a
  * complete callback of a descriptor submitted on the same channel.
@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
  *
  * Synchronizes to the DMA channel termination to the current context. When this
  * function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
  * with them. Furthermore it is guaranteed that all complete callback functions
  * for a previously submitted descriptor have finished running and it is safe to
  * free resources accessed from within the complete callbacks.
index 251f1f783cdf70af1977b48a512f492daf0935e9..9430d01c0c3d339f9402935d0d164223070c2602 100644 (file)
@@ -1245,4 +1245,6 @@ struct linux_efi_memreserve {
 
 void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
 
+char *efi_systab_show_arch(char *str);
+
 #endif /* _LINUX_EFI_H */
index 3049a6c06d9e1151e4fe5317f737faee0a2137c7..b79fa9bb7359531f294dafb7550bd9fc0208987c 100644 (file)
@@ -47,8 +47,7 @@
  * Directory entry modification events - reported only to directory
  * where entry is modified and not to a watching parent.
  */
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
-                                FAN_DIR_MODIFY)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
 
 /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
 #define FANOTIFY_INODE_EVENTS  (FANOTIFY_DIRENT_EVENTS | \
index 4f6f59b4f22a807e55e479468df1f1cb7068cfdc..45cc10cdf6ddd760aeadc92d255f65b132ed67cc 100644 (file)
@@ -983,7 +983,7 @@ struct file_handle {
        __u32 handle_bytes;
        int handle_type;
        /* file identifier */
-       unsigned char f_handle[0];
+       unsigned char f_handle[];
 };
 
 static inline struct file *get_file(struct file *f)
index db95244a62d44db0b7d921e4a3f66ec5cf2acb39..ab4bd15cbcdb33842a5d0eba53bf536c9558c8ed 100644 (file)
@@ -210,6 +210,29 @@ struct ftrace_ops {
 #endif
 };
 
+extern struct ftrace_ops __rcu *ftrace_ops_list;
+extern struct ftrace_ops ftrace_list_end;
+
+/*
+ * Traverse the ftrace_global_list, invoking all entries.  The reason that we
+ * can use rcu_dereference_raw_check() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
+ * concurrent insertions into the ftrace_global_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list)                        \
+       op = rcu_dereference_raw_check(list);                   \
+       do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op)                           \
+       while (likely(op = rcu_dereference_raw_check((op)->next)) &&    \
+              unlikely((op) != &ftrace_list_end))
+
 /*
  * Type of the current tracing.
  */
index 62d216ff1097940c3d28626213d4e581589637ef..c230b4e70d759283100a4e496907c3245722d235 100644 (file)
@@ -17,9 +17,12 @@ enum host1x_class {
        HOST1X_CLASS_GR3D = 0x60,
 };
 
+struct host1x;
 struct host1x_client;
 struct iommu_group;
 
+u64 host1x_get_dma_mask(struct host1x *host1x);
+
 /**
  * struct host1x_client_ops - host1x client operations
  * @init: host1x client initialization code
index c5a977320f825958ca0b114491ba054e1730beb9..98ef73b7c8fd945b840b9225170d8661e195b707 100644 (file)
@@ -29,7 +29,7 @@ struct i2c_mux_core {
 
        int num_adapters;
        int max_adapters;
-       struct i2c_adapter *adapter[0];
+       struct i2c_adapter *adapter[];
 };
 
 struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
index 45d36ba4826bd8736b12375b3262f1f7980be50b..49d29054e6571175eb8dce9b908785d3b657544a 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * i2c.h - definitions for the Linux i2c bus interface
  * Copyright (C) 1995-2000 Simon G. Vogl
- * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de>
+ * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org>
  *
  * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
  * Frodo Looijaard <frodol@dds.nl>
index 16268ef1cbccc4a4355062eeed0b329a94a0c8d8..5d3e48d020339b0e33d4edf3f974ca5273dcf983 100644 (file)
@@ -2047,7 +2047,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
 }
 
 /* HE Operation defines */
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK           0x00000003
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK           0x00000007
 #define IEEE80211_HE_OPERATION_TWT_REQUIRED                    0x00000008
 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK              0x00003ff0
 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET            4
index 79f918c6e8c5b94ef75c5e6f285bc969160c4e2c..906da5fc06e0f4aa66320cdf854fa05c5caf7ad6 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * public include for LM8333 keypad driver - same license as driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
  */
 
 #ifndef _LM8333_H
index 980234ae0312259e938f0dc5c63dc4209fa7aed8..4100bd224f5c5211df76eb7450469634f068a026 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/iommu.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/dmar.h>
+#include <linux/ioasid.h>
 
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
@@ -42,6 +43,9 @@
 #define DMA_FL_PTE_PRESENT     BIT_ULL(0)
 #define DMA_FL_PTE_XD          BIT_ULL(63)
 
+#define ADDR_WIDTH_5LEVEL      (57)
+#define ADDR_WIDTH_4LEVEL      (48)
+
 #define CONTEXT_TT_MULTI_LEVEL 0
 #define CONTEXT_TT_DEV_IOTLB   1
 #define CONTEXT_TT_PASS_THROUGH 2
 #define ecap_smpwc(e)          (((e) >> 48) & 0x1)
 #define ecap_flts(e)           (((e) >> 47) & 0x1)
 #define ecap_slts(e)           (((e) >> 46) & 0x1)
+#define ecap_vcs(e)            (((e) >> 44) & 0x1)
 #define ecap_smts(e)           (((e) >> 43) & 0x1)
 #define ecap_dit(e)            ((e >> 41) & 0x1)
 #define ecap_pasid(e)          ((e >> 40) & 0x1)
 #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
 #define ecap_sc_support(e)     ((e >> 7) & 0x1) /* Snooping Control */
 
+/* Virtual command interface capability */
+#define vccap_pasid(v)         (((v) & DMA_VCS_PAS)) /* PASID allocation */
+
 /* IOTLB_REG */
 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
 
 /* PRS_REG */
 #define DMA_PRS_PPR    ((u32)1)
+#define DMA_PRS_PRO    ((u32)2)
+
+#define DMA_VCS_PAS    ((u64)1)
 
 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)                    \
 do {                                                                   \
@@ -324,6 +335,8 @@ enum {
 
 #define QI_IWD_STATUS_DATA(d)  (((u64)d) << 32)
 #define QI_IWD_STATUS_WRITE    (((u64)1) << 5)
+#define QI_IWD_FENCE           (((u64)1) << 6)
+#define QI_IWD_PRQ_DRAIN       (((u64)1) << 7)
 
 #define QI_IOTLB_DID(did)      (((u64)did) << 16)
 #define QI_IOTLB_DR(dr)        (((u64)dr) << 7)
@@ -331,7 +344,7 @@ enum {
 #define QI_IOTLB_GRAN(gran)    (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
 #define QI_IOTLB_ADDR(addr)    (((u64)addr) & VTD_PAGE_MASK)
 #define QI_IOTLB_IH(ih)                (((u64)ih) << 6)
-#define QI_IOTLB_AM(am)                (((u8)am))
+#define QI_IOTLB_AM(am)                (((u8)am) & 0x3f)
 
 #define QI_CC_FM(fm)           (((u64)fm) << 48)
 #define QI_CC_SID(sid)         (((u64)sid) << 32)
@@ -350,16 +363,21 @@ enum {
 #define QI_PC_DID(did)         (((u64)did) << 16)
 #define QI_PC_GRAN(gran)       (((u64)gran) << 4)
 
-#define QI_PC_ALL_PASIDS       (QI_PC_TYPE | QI_PC_GRAN(0))
-#define QI_PC_PASID_SEL                (QI_PC_TYPE | QI_PC_GRAN(1))
+/* PASID cache invalidation granu */
+#define QI_PC_ALL_PASIDS       0
+#define QI_PC_PASID_SEL                1
 
 #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
 #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am)       (((u64)am))
+#define QI_EIOTLB_AM(am)       (((u64)am) & 0x3f)
 #define QI_EIOTLB_PASID(pasid)         (((u64)pasid) << 32)
 #define QI_EIOTLB_DID(did)     (((u64)did) << 16)
 #define QI_EIOTLB_GRAN(gran)   (((u64)gran) << 4)
 
+/* QI Dev-IOTLB inv granu */
+#define QI_DEV_IOTLB_GRAN_ALL          1
+#define QI_DEV_IOTLB_GRAN_PASID_SEL    0
+
 #define QI_DEV_EIOTLB_ADDR(a)  ((u64)(a) & VTD_PAGE_MASK)
 #define QI_DEV_EIOTLB_SIZE     (((u64)1) << 11)
 #define QI_DEV_EIOTLB_GLOB(g)  ((u64)g)
@@ -480,6 +498,23 @@ struct context_entry {
        u64 hi;
 };
 
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY            BIT(0)
+
+/*
+ * When VT-d works in the scalable mode, it allows DMA translation to
+ * happen through either first level or second level page table. This
+ * bit marks that the DMA translation for the domain goes through the
+ * first level page table, otherwise, it goes through the second level.
+ */
+#define DOMAIN_FLAG_USE_FIRST_LEVEL            BIT(1)
+
+/*
+ * Domain represents a virtual machine which demands iommu nested
+ * translation mode support.
+ */
+#define DOMAIN_FLAG_NESTING_MODE               BIT(2)
+
 struct dmar_domain {
        int     nid;                    /* node id */
 
@@ -529,6 +564,7 @@ struct intel_iommu {
        u64             reg_size; /* size of hw register set */
        u64             cap;
        u64             ecap;
+       u64             vccap;
        u32             gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
        raw_spinlock_t  register_lock; /* protect register handling */
        int             seq_id; /* sequence id of the iommu */
@@ -549,6 +585,8 @@ struct intel_iommu {
 #ifdef CONFIG_INTEL_IOMMU_SVM
        struct page_req_dsc *prq;
        unsigned char prq_name[16];    /* Name for PRQ interrupt */
+       struct completion prq_complete;
+       struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
 #endif
        struct q_inval  *qi;            /* Queued invalidation info */
        u32 *iommu_state; /* Store iommu states between suspend and resume.*/
@@ -571,6 +609,7 @@ struct device_domain_info {
        struct list_head auxiliary_domains; /* auxiliary domains
                                             * attached to this device
                                             */
+       u32 segment;            /* PCI segment number */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
        u16 pfsid;              /* SRIOV physical function source ID */
@@ -595,6 +634,12 @@ static inline void __iommu_flush_cache(
                clflush_cache_range(addr, size);
 }
 
+/* Convert generic struct iommu_domain to private struct dmar_domain */
+static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct dmar_domain, domain);
+}
+
 /*
  * 0: readable
  * 1: writable
@@ -653,9 +698,23 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
                          unsigned int size_order, u64 type);
 extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
                        u16 qdep, u64 addr, unsigned mask);
+
 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
                     unsigned long npages, bool ih);
-extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+
+void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+                             u32 pasid, u16 qdep, u64 addr,
+                             unsigned int size_order, u64 granu);
+void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
+                         int pasid);
+
+int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
+                  unsigned int count, unsigned long options);
+/*
+ * Options used in qi_submit_sync:
+ * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
+ */
+#define QI_OPT_WAIT_DRAIN              BIT(0)
 
 extern int dmar_ir_support(void);
 
@@ -667,12 +726,19 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
 void iommu_flush_write_buffer(struct intel_iommu *iommu);
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
 struct dmar_domain *find_domain(struct device *dev);
+struct device_domain_info *get_domain_info(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
 extern void intel_svm_check(struct intel_iommu *iommu);
 extern int intel_svm_enable_prq(struct intel_iommu *iommu);
 extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-
+int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
+                         struct iommu_gpasid_bind_data *data);
+int intel_svm_unbind_gpasid(struct device *dev, int pasid);
+struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
+                                void *drvdata);
+void intel_svm_unbind(struct iommu_sva *handle);
+int intel_svm_get_pasid(struct iommu_sva *handle);
 struct svm_dev_ops;
 
 struct intel_svm_dev {
@@ -680,6 +746,8 @@ struct intel_svm_dev {
        struct rcu_head rcu;
        struct device *dev;
        struct svm_dev_ops *ops;
+       struct iommu_sva sva;
+       int pasid;
        int users;
        u16 did;
        u16 dev_iotlb:1;
@@ -689,9 +757,11 @@ struct intel_svm_dev {
 struct intel_svm {
        struct mmu_notifier notifier;
        struct mm_struct *mm;
+
        struct intel_iommu *iommu;
        int flags;
        int pasid;
+       int gpasid; /* In case that guest PASID is different from host PASID */
        struct list_head devs;
        struct list_head list;
 };
index d7c403d0dd27d84ecdc8d8b094739e32b85968e2..c9e7e601950d6e08dc2cb91e18a410383484df87 100644 (file)
@@ -21,7 +21,6 @@ struct svm_dev_ops {
 #define SVM_REQ_EXEC   (1<<1)
 #define SVM_REQ_PRIV   (1<<0)
 
-
 /*
  * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main"
  * PASID for the current process. Even if a PASID already exists, a new one
@@ -44,90 +43,17 @@ struct svm_dev_ops {
  * do such IOTLB flushes automatically.
  */
 #define SVM_FLAG_SUPERVISOR_MODE       (1<<1)
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-
-/**
- * intel_svm_bind_mm() - Bind the current process to a PASID
- * @dev:       Device to be granted access
- * @pasid:     Address for allocated PASID
- * @flags:     Flags. Later for requesting supervisor mode, etc.
- * @ops:       Callbacks to device driver
- *
- * This function attempts to enable PASID support for the given device.
- * If the @pasid argument is non-%NULL, a PASID is allocated for access
- * to the MM of the current process.
- *
- * By using a %NULL value for the @pasid argument, this function can
- * be used to simply validate that PASID support is available for the
- * given device — i.e. that it is behind an IOMMU which has the
- * requisite support, and is enabled.
- *
- * Page faults are handled transparently by the IOMMU code, and there
- * should be no need for the device driver to be involved. If a page
- * fault cannot be handled (i.e. is an invalid address rather than
- * just needs paging in), then the page request will be completed by
- * the core IOMMU code with appropriate status, and the device itself
- * can then report the resulting fault to its driver via whatever
- * mechanism is appropriate.
- *
- * Multiple calls from the same process may result in the same PASID
- * being re-used. A reference count is kept.
- */
-extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
-                            struct svm_dev_ops *ops);
-
-/**
- * intel_svm_unbind_mm() - Unbind a specified PASID
- * @dev:       Device for which PASID was allocated
- * @pasid:     PASID value to be unbound
- *
- * This function allows a PASID to be retired when the device no
- * longer requires access to the address space of a given process.
- *
- * If the use count for the PASID in question reaches zero, the
- * PASID is revoked and may no longer be used by hardware.
- *
- * Device drivers are required to ensure that no access (including
- * page requests) is currently outstanding for the PASID in question,
- * before calling this function.
+/*
+ * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
+ * processes. Compared to the host bind, the primary differences are:
+ * 1. mm life cycle management
+ * 2. fault reporting
  */
-extern int intel_svm_unbind_mm(struct device *dev, int pasid);
-
-/**
- * intel_svm_is_pasid_valid() - check if pasid is valid
- * @dev:       Device for which PASID was allocated
- * @pasid:     PASID value to be checked
- *
- * This function checks if the specified pasid is still valid. A
- * valid pasid means the backing mm is still having a valid user.
- * For kernel callers init_mm is always valid. for other mm, if mm->mm_users
- * is non-zero, it is valid.
- *
- * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid
- * 1 if pasid is valid.
+#define SVM_FLAG_GUEST_MODE            (1<<2)
+/*
+ * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
+ * which requires guest and host PASID translation at both directions.
  */
-extern int intel_svm_is_pasid_valid(struct device *dev, int pasid);
-
-#else /* CONFIG_INTEL_IOMMU_SVM */
-
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
-                                   int flags, struct svm_dev_ops *ops)
-{
-       return -ENOSYS;
-}
-
-static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
-{
-       BUG();
-}
-
-static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
-{
-       return -EINVAL;
-}
-#endif /* CONFIG_INTEL_IOMMU_SVM */
-
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
+#define SVM_FLAG_GUEST_PASID           (1<<3)
 
 #endif /* __INTEL_SVM_H__ */
index 7cfd2dddb49da9634a5826048b0f797a13b889a9..4e3cd32f5b3773205c83b42441a9a6feb753ee38 100644 (file)
@@ -53,8 +53,6 @@ struct iommu_fault_event;
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
                        struct device *, unsigned long, int, void *);
-typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
-                                      void *);
 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
 
 struct iommu_domain_geometry {
@@ -171,25 +169,6 @@ enum iommu_dev_features {
 
 #define IOMMU_PASID_INVALID    (-1U)
 
-/**
- * struct iommu_sva_ops - device driver callbacks for an SVA context
- *
- * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
- *           @mm_exit returns, the device must not issue any more transaction
- *           with the PASID given as argument.
- *
- *           The @mm_exit handler is allowed to sleep. Be careful about the
- *           locks taken in @mm_exit, because they might lead to deadlocks if
- *           they are also held when dropping references to the mm. Consider the
- *           following call chain:
- *           mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
- *           Using mmput_async() prevents this scenario.
- *
- */
-struct iommu_sva_ops {
-       iommu_mm_exit_handler_t mm_exit;
-};
-
 #ifdef CONFIG_IOMMU_API
 
 /**
@@ -482,8 +461,6 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 extern void generic_iommu_put_resv_regions(struct device *dev,
                                           struct list_head *list);
-extern int iommu_request_dm_for_dev(struct device *dev);
-extern int iommu_request_dma_domain_for_dev(struct device *dev);
 extern void iommu_set_default_passthrough(bool cmd_line);
 extern void iommu_set_default_translated(bool cmd_line);
 extern bool iommu_default_passthrough(void);
@@ -616,7 +593,6 @@ struct iommu_fwspec {
  */
 struct iommu_sva {
        struct device                   *dev;
-       const struct iommu_sva_ops      *ops;
 };
 
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
@@ -664,8 +640,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
                                        struct mm_struct *mm,
                                        void *drvdata);
 void iommu_sva_unbind_device(struct iommu_sva *handle);
-int iommu_sva_set_ops(struct iommu_sva *handle,
-                     const struct iommu_sva_ops *ops);
 int iommu_sva_get_pasid(struct iommu_sva *handle);
 
 #else /* CONFIG_IOMMU_API */
@@ -804,16 +778,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group,
        return -ENODEV;
 }
 
-static inline int iommu_request_dm_for_dev(struct device *dev)
-{
-       return -ENODEV;
-}
-
-static inline int iommu_request_dma_domain_for_dev(struct device *dev)
-{
-       return -ENODEV;
-}
-
 static inline void iommu_set_default_passthrough(bool cmd_line)
 {
 }
@@ -1069,12 +1033,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
 {
 }
 
-static inline int iommu_sva_set_ops(struct iommu_sva *handle,
-                                   const struct iommu_sva_ops *ops)
-{
-       return -EINVAL;
-}
-
 static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
 {
        return IOMMU_PASID_INVALID;
index 01276e3d01b920702642d789a830cd5d441319cb..131cc1527d689a8ee3acf2da61def9a4a900d7a9 100644 (file)
@@ -813,8 +813,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 
 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+                                struct kvm_vcpu *except,
                                 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+                                     struct kvm_vcpu *except);
 bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
                                unsigned long *vcpu_bitmap);
 
index 9cd4455528e507c9e5b05cf5c63e7e7a1e7ec25b..5616b2567aa7fba73b0c30bb276fc98df65551ae 100644 (file)
@@ -55,7 +55,7 @@ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
 LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
 LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
         struct fs_context *src_sc)
-LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc,
+LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
         struct fs_parameter *param)
 LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
 LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
@@ -243,7 +243,7 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
         char **value)
 LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
 LSM_HOOK(int, 0, ismaclabel, const char *name)
-LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata,
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
         u32 *seclen)
 LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
 LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
index d275c72c4f8efd09b9b3ccf8a8dffc6344d1a642..977edd3b7bd8934b9408b81ff5591a9a684b09f9 100644 (file)
@@ -783,6 +783,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
                atomic_long_inc(&memcg->memory_events[event]);
                cgroup_file_notify(&memcg->events_file);
 
+               if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+                       break;
                if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
                        break;
        } while ((memcg = parent_mem_cgroup(memcg)) &&
index ad19960019653fceb12a220f291483246586f3f2..3d7c3c26eeb9a829b0e289746c77a980242ef82d 100644 (file)
@@ -53,9 +53,9 @@ enum mhi_callback {
  * @MHI_CHAIN: Linked transfer
  */
 enum mhi_flags {
-       MHI_EOB,
-       MHI_EOT,
-       MHI_CHAIN,
+       MHI_EOB = BIT(0),
+       MHI_EOT = BIT(1),
+       MHI_CHAIN = BIT(2),
 };
 
 /**
@@ -335,14 +335,15 @@ struct mhi_controller_config {
  * @syserr_worker: System error worker
  * @state_event: State change event
  * @status_cb: CB function to notify power states of the device (required)
- * @link_status: CB function to query link status of the device (required)
  * @wake_get: CB function to assert device wake (optional)
  * @wake_put: CB function to de-assert device wake (optional)
  * @wake_toggle: CB function to assert and de-assert device wake (optional)
  * @runtime_get: CB function to controller runtime resume (required)
- * @runtimet_put: CB function to decrement pm usage (required)
+ * @runtime_put: CB function to decrement pm usage (required)
  * @map_single: CB function to create TRE buffer
  * @unmap_single: CB function to destroy TRE buffer
+ * @read_reg: Read a MHI register via the physical link (required)
+ * @write_reg: Write a MHI register via the physical link (required)
  * @buffer_len: Bounce buffer length
  * @bounce_buf: Use of bounce buffer
  * @fbc_download: MHI host needs to do complete image transfer (optional)
@@ -417,7 +418,6 @@ struct mhi_controller {
 
        void (*status_cb)(struct mhi_controller *mhi_cntrl,
                          enum mhi_callback cb);
-       int (*link_status)(struct mhi_controller *mhi_cntrl);
        void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
        void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
        void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
@@ -427,6 +427,10 @@ struct mhi_controller {
                          struct mhi_buf_info *buf);
        void (*unmap_single)(struct mhi_controller *mhi_cntrl,
                             struct mhi_buf_info *buf);
+       int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+                       u32 *out);
+       void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
+                         u32 val);
 
        size_t buffer_len;
        bool bounce_buf;
index 6f8f79ef829b1829b9c19eba43857c4467d8fa9b..8397b6558dc7c086a93bba3735b8469ec22f4442 100644 (file)
@@ -213,6 +213,12 @@ enum mlx5_port_status {
        MLX5_PORT_DOWN      = 2,
 };
 
+enum mlx5_cmdif_state {
+       MLX5_CMDIF_STATE_UNINITIALIZED,
+       MLX5_CMDIF_STATE_UP,
+       MLX5_CMDIF_STATE_DOWN,
+};
+
 struct mlx5_cmd_first {
        __be32          data[4];
 };
@@ -258,6 +264,7 @@ struct mlx5_cmd_stats {
 struct mlx5_cmd {
        struct mlx5_nb    nb;
 
+       enum mlx5_cmdif_state   state;
        void           *cmd_alloc_buf;
        dma_addr_t      alloc_dma;
        int             alloc_size;
@@ -284,6 +291,7 @@ struct mlx5_cmd {
        struct semaphore sem;
        struct semaphore pages_sem;
        int     mode;
+       u16     allowed_opcode;
        struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
        struct dma_pool *pool;
        struct mlx5_cmd_debug dbg;
@@ -743,6 +751,7 @@ struct mlx5_cmd_work_ent {
        struct delayed_work     cb_timeout_work;
        void                   *context;
        int                     idx;
+       struct completion       handling;
        struct completion       done;
        struct mlx5_cmd        *cmd;
        struct work_struct      work;
@@ -874,10 +883,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
        return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
 }
 
+enum {
+       CMD_ALLOWED_OPCODE_ALL,
+};
+
 int mlx5_cmd_init(struct mlx5_core_dev *dev);
 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+                       enum mlx5_cmdif_state cmdif_state);
 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
 
 struct mlx5_async_ctx {
        struct mlx5_core_dev *dev;
index 5a323422d783d076c01b41b2a9a1f4bbd7d1a6a5..f3fe7371855cec8769fbee500632d52c05cf4610 100644 (file)
@@ -782,6 +782,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kvfree(const void *addr);
 
+/*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+ *
+ * Must be called only for compound pages or any their tail sub-pages.
+ */
 static inline int compound_mapcount(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageCompound(page), page);
@@ -801,10 +806,16 @@ static inline void page_mapcount_reset(struct page *page)
 
 int __page_mapcount(struct page *page);
 
+/*
+ * Mapcount of 0-order page; when compound sub-page, includes
+ * compound_mapcount().
+ *
+ * Result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this place in struct page differently.
+ */
 static inline int page_mapcount(struct page *page)
 {
-       VM_BUG_ON_PAGE(PageSlab(page), page);
-
        if (unlikely(PageCompound(page)))
                return __page_mapcount(page);
        return atomic_read(&page->_mapcount) + 1;
index fcc409de31a406af2c8418e29077f0f0abe68699..a28aa289afdca9bd5653764cfbc38f784e1c82ed 100644 (file)
@@ -10,7 +10,7 @@
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
 
-extern const char *const pptp_msg_name[];
+const char *pptp_msg_name(u_int16_t msg);
 
 /* state of the control session */
 enum pptp_ctrlsess_state {
index 440230488025199340c11cd06d8f76974b431a45..e5f3e7d8d3d59a4e4e558f82338f2de08f18c4de 100644 (file)
@@ -1317,11 +1317,13 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
 struct nfs41_bind_conn_to_session_args {
        struct nfs_client               *client;
        struct nfs4_sessionid           sessionid;
        u32                             dir;
        bool                            use_conn_in_rdma_mode;
+       int                             retries;
 };
 
 struct nfs41_bind_conn_to_session_res {
index d08f0869f1213e3d5df4c65ad9259d280aac4342..f75c307f346de900ecfc6b3693c1ef2ba115f122 100644 (file)
@@ -6,11 +6,14 @@
 
 #ifdef CONFIG_PCI_ATS
 /* Address Translation Service */
+bool pci_ats_supported(struct pci_dev *dev);
 int pci_enable_ats(struct pci_dev *dev, int ps);
 void pci_disable_ats(struct pci_dev *dev);
 int pci_ats_queue_depth(struct pci_dev *dev);
 int pci_ats_page_aligned(struct pci_dev *dev);
 #else /* CONFIG_PCI_ATS */
+static inline bool pci_ats_supported(struct pci_dev *d)
+{ return false; }
 static inline int pci_enable_ats(struct pci_dev *d, int ps)
 { return -ENODEV; }
 static inline void pci_disable_ats(struct pci_dev *d) { }
index c588be843f61ba2c37a763253a7b4bba5077b729..0ecce6aa69d5e3a1ade4ef14b12b1141b5e4be69 100644 (file)
@@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
 void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
                                            u8 sensor_num);
 
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
 int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
 void cros_ec_sensorhub_ring_remove(void *arg);
 int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
index bdc35753ef7c1cf8488ef1b3079114a600fa8da1..77a2aada106dcb016833d77cc3b0842c35a4db4b 100644 (file)
@@ -25,6 +25,7 @@ struct platform_device {
        bool            id_auto;
        struct device   dev;
        u64             platform_dma_mask;
+       struct device_dma_parameters dma_parms;
        u32             num_resources;
        struct resource *resource;
 
index 121a7eda459351cf215fb497dbf4e22bdadf8f21..c602670bbffb64d5130a5b2d3011605dfea814d3 100644 (file)
@@ -105,10 +105,10 @@ struct ptp_system_timestamp {
  *            parameter func: the desired function to use.
  *            parameter chan: the function channel index to use.
  *
- * @do_work:  Request driver to perform auxiliary (periodic) operations
- *           Driver should return delay of the next auxiliary work scheduling
- *           time (>=0) or negative value in case further scheduling
- *           is not required.
+ * @do_aux_work:  Request driver to perform auxiliary (periodic) operations
+ *                Driver should return delay of the next auxiliary work
+ *                scheduling time (>=0) or negative value in case further
+ *                scheduling is not required.
  *
  * Drivers should embed their ptp_clock_info within a private
  * structure, obtaining a reference to it using container_of().
index 8a709f63c5e5713e2f695a28db8a7afd3e7f4aa9..ad31c9fb71584b4b5711f0cb8bb5bab3c39ad5bb 100644 (file)
@@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
        dst->sg.data[which] = src->sg.data[which];
        dst->sg.data[which].length  = size;
        dst->sg.size               += size;
+       src->sg.size               -= size;
        src->sg.data[which].length -= size;
        src->sg.data[which].offset += size;
 }
index ca7e108248e211208c0d2317b3b28cbd8394660f..02e7a5863d289c7492a042367daac28730597cd8 100644 (file)
@@ -71,7 +71,13 @@ struct rpc_clnt {
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        struct dentry           *cl_debugfs;    /* debugfs directory */
 #endif
-       struct rpc_xprt_iter    cl_xpi;
+       /* cl_work is only needed after cl_xpi is no longer used,
+        * and that are of similar size
+        */
+       union {
+               struct rpc_xprt_iter    cl_xpi;
+               struct work_struct      cl_work;
+       };
        const struct cred       *cl_cred;
 };
 
@@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
                (task->tk_msg.rpc_proc->p_decode != NULL);
 }
 
+static inline void rpc_task_close_connection(struct rpc_task *task)
+{
+       if (task->tk_xprt)
+               xprt_force_disconnect(task->tk_xprt);
+}
 #endif /* _LINUX_SUNRPC_CLNT_H */
index 48c1b1674cbf0b3766076b4773de7056c1af707a..bc07e51f20d1c2b0f350cb25dae70504d90274f4 100644 (file)
@@ -21,6 +21,7 @@
 struct gss_ctx {
        struct gss_api_mech     *mech_type;
        void                    *internal_ctx_id;
+       unsigned int            slack, align;
 };
 
 #define GSS_C_NO_BUFFER                ((struct xdr_netobj) 0)
@@ -66,6 +67,7 @@ u32 gss_wrap(
 u32 gss_unwrap(
                struct gss_ctx          *ctx_id,
                int                     offset,
+               int                     len,
                struct xdr_buf          *inbuf);
 u32 gss_delete_sec_context(
                struct gss_ctx          **ctx_id);
@@ -126,6 +128,7 @@ struct gss_api_ops {
        u32 (*gss_unwrap)(
                        struct gss_ctx          *ctx_id,
                        int                     offset,
+                       int                     len,
                        struct xdr_buf          *buf);
        void (*gss_delete_sec_context)(
                        void                    *internal_ctx_id);
index c1d77dd8ed416399e92ca39c49d9a056cae3d544..e8f8ffe7448b27df4b6cf1dde20fa24e5981e77d 100644 (file)
@@ -83,7 +83,7 @@ struct gss_krb5_enctype {
        u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
                           struct xdr_buf *buf,
                           struct page **pages); /* v2 encryption function */
-       u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+       u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
                           struct xdr_buf *buf, u32 *headskip,
                           u32 *tailskip);      /* v2 decryption function */
 };
@@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
                struct xdr_buf *outbuf, struct page **pages);
 
 u32
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
                struct xdr_buf *buf);
 
 
@@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
                     struct page **pages);
 
 u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
                     struct xdr_buf *buf, u32 *plainoffset,
                     u32 *plainlen);
 
index 01bb41908c93e22a7db40daeb3dff15b11ce9be1..22c207b2425fc2b4ef915b0dc591e094257c5970 100644 (file)
@@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
 extern void xdr_shift_buf(struct xdr_buf *, size_t);
 extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
 extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
 extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
 extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
 
index 421c99c12291994ce9f023fbedbc9089c8a5690d..4f8159e90ce1783475bfe7ec4fa5e99ac42b11be 100644 (file)
@@ -78,47 +78,6 @@ struct tcp_sack_block {
 #define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
 #define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
 
-#if IS_ENABLED(CONFIG_MPTCP)
-struct mptcp_options_received {
-       u64     sndr_key;
-       u64     rcvr_key;
-       u64     data_ack;
-       u64     data_seq;
-       u32     subflow_seq;
-       u16     data_len;
-       u16     mp_capable : 1,
-               mp_join : 1,
-               dss : 1,
-               add_addr : 1,
-               rm_addr : 1,
-               family : 4,
-               echo : 1,
-               backup : 1;
-       u32     token;
-       u32     nonce;
-       u64     thmac;
-       u8      hmac[20];
-       u8      join_id;
-       u8      use_map:1,
-               dsn64:1,
-               data_fin:1,
-               use_ack:1,
-               ack64:1,
-               mpc_map:1,
-               __unused:2;
-       u8      addr_id;
-       u8      rm_id;
-       union {
-               struct in_addr  addr;
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-               struct in6_addr addr6;
-#endif
-       };
-       u64     ahmac;
-       u16     port;
-};
-#endif
-
 struct tcp_options_received {
 /*     PAWS/RTTM data  */
        int     ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -136,9 +95,6 @@ struct tcp_options_received {
        u8      num_sacks;      /* Number of SACK blocks                */
        u16     user_mss;       /* mss requested by user in ioctl       */
        u16     mss_clamp;      /* Maximal mss, negotiated at connection setup */
-#if IS_ENABLED(CONFIG_MPTCP)
-       struct mptcp_options_received   mptcp;
-#endif
 };
 
 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
 #if IS_ENABLED(CONFIG_SMC)
        rx_opt->smc_ok = 0;
 #endif
-#if IS_ENABLED(CONFIG_MPTCP)
-       rx_opt->mptcp.mp_capable = 0;
-       rx_opt->mptcp.mp_join = 0;
-       rx_opt->mptcp.add_addr = 0;
-       rx_opt->mptcp.rm_addr = 0;
-       rx_opt->mptcp.dss = 0;
-#endif
 }
 
 /* This is the max number of SACKS that we'll generate and process. It's safe
index bd5fe0e907e8c1c219a496120d49a0fa42b8c736..a99e9b8e4e316b715d2f7b7cd28d8cf5fb4395b3 100644 (file)
@@ -66,7 +66,7 @@ struct tty_buffer {
        int read;
        int flags;
        /* Data points here */
-       unsigned long data[0];
+       unsigned long data[];
 };
 
 /* Values for .flags field of tty_buffer */
index 0e215e6d0534aa06b335889f0c136c383b3b3933..454c2f6672d791722b2201918380f3265b904909 100644 (file)
@@ -68,19 +68,21 @@ enum uacce_q_state {
  * @uacce: pointer to uacce
  * @priv: private pointer
  * @wait: wait queue head
- * @list: index into uacce_mm
- * @uacce_mm: the corresponding mm
+ * @list: index into uacce queues list
  * @qfrs: pointer of qfr regions
  * @state: queue state machine
+ * @pasid: pasid associated to the mm
+ * @handle: iommu_sva handle returned by iommu_sva_bind_device()
  */
 struct uacce_queue {
        struct uacce_device *uacce;
        void *priv;
        wait_queue_head_t wait;
        struct list_head list;
-       struct uacce_mm *uacce_mm;
        struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
        enum uacce_q_state state;
+       int pasid;
+       struct iommu_sva *handle;
 };
 
 /**
@@ -96,8 +98,8 @@ struct uacce_queue {
  * @cdev: cdev of the uacce
  * @dev: dev of the uacce
  * @priv: private pointer of the uacce
- * @mm_list: list head of uacce_mm->list
- * @mm_lock: lock for mm_list
+ * @queues: list of queues
+ * @queues_lock: lock for queues list
  * @inode: core vfs
  */
 struct uacce_device {
@@ -112,27 +114,9 @@ struct uacce_device {
        struct cdev *cdev;
        struct device dev;
        void *priv;
-       struct list_head mm_list;
-       struct mutex mm_lock;
-       struct inode *inode;
-};
-
-/**
- * struct uacce_mm - keep track of queues bound to a process
- * @list: index into uacce_device
- * @queues: list of queues
- * @mm: the mm struct
- * @lock: protects the list of queues
- * @pasid: pasid of the uacce_mm
- * @handle: iommu_sva handle return from iommu_sva_bind_device
- */
-struct uacce_mm {
-       struct list_head list;
        struct list_head queues;
-       struct mm_struct *mm;
-       struct mutex lock;
-       int pasid;
-       struct iommu_sva *handle;
+       struct mutex queues_lock;
+       struct inode *inode;
 };
 
 #if IS_ENABLED(CONFIG_UACCE)
index 0d1fe9297ac67ad1043e6f3094aad9610a893d75..e8a924eeea3d01c86c40766445c5661c395bce6c 100644 (file)
@@ -3,6 +3,8 @@
 #define _LINUX_VIRTIO_NET_H
 
 #include <linux/if_vlan.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/udp.h>
 #include <uapi/linux/virtio_net.h>
 
 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@@ -28,17 +30,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        bool little_endian)
 {
        unsigned int gso_type = 0;
+       unsigned int thlen = 0;
+       unsigned int p_off = 0;
+       unsigned int ip_proto;
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                        gso_type = SKB_GSO_TCPV4;
+                       ip_proto = IPPROTO_TCP;
+                       thlen = sizeof(struct tcphdr);
                        break;
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        gso_type = SKB_GSO_TCPV6;
+                       ip_proto = IPPROTO_TCP;
+                       thlen = sizeof(struct tcphdr);
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
                        gso_type = SKB_GSO_UDP;
+                       ip_proto = IPPROTO_UDP;
+                       thlen = sizeof(struct udphdr);
                        break;
                default:
                        return -EINVAL;
@@ -57,16 +68,23 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
+
+               p_off = skb_transport_offset(skb) + thlen;
+               if (p_off > skb_headlen(skb))
+                       return -EINVAL;
        } else {
                /* gso packets without NEEDS_CSUM do not set transport_offset.
                 * probe and drop if does not match one of the above types.
                 */
                if (gso_type && skb->network_header) {
+                       struct flow_keys_basic keys;
+
                        if (!skb->protocol)
                                virtio_net_hdr_set_proto(skb, hdr);
 retry:
-                       skb_probe_transport_header(skb);
-                       if (!skb_transport_header_was_set(skb)) {
+                       if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+                                                             NULL, 0, 0, 0,
+                                                             0)) {
                                /* UFO does not specify ipv4 or 6: try both */
                                if (gso_type & SKB_GSO_UDP &&
                                    skb->protocol == htons(ETH_P_IP)) {
@@ -75,18 +93,33 @@ retry:
                                }
                                return -EINVAL;
                        }
+
+                       p_off = keys.control.thoff + thlen;
+                       if (p_off > skb_headlen(skb) ||
+                           keys.basic.ip_proto != ip_proto)
+                               return -EINVAL;
+
+                       skb_set_transport_header(skb, keys.control.thoff);
+               } else if (gso_type) {
+                       p_off = thlen;
+                       if (p_off > skb_headlen(skb))
+                               return -EINVAL;
                }
        }
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+               struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-               skb_shinfo(skb)->gso_size = gso_size;
-               skb_shinfo(skb)->gso_type = gso_type;
+               /* Too small packets are not really GSO ones. */
+               if (skb->len - p_off > gso_size) {
+                       shinfo->gso_size = gso_size;
+                       shinfo->gso_type = gso_type;
 
-               /* Header must be checked, and gso_segs computed. */
-               skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-               skb_shinfo(skb)->gso_segs = 0;
+                       /* Header must be checked, and gso_segs computed. */
+                       shinfo->gso_type |= SKB_GSO_DODGY;
+                       shinfo->gso_segs = 0;
+               }
        }
 
        return 0;
index 71c81e0dc8f28c06f134f25279b01aa5d230f982..dc636b7271791eeab5eaa2e390684c9b23ff51bb 100644 (file)
@@ -48,6 +48,7 @@ struct virtio_vsock_pkt {
        u32 len;
        u32 off;
        bool reply;
+       bool tap_delivered;
 };
 
 struct virtio_vsock_pkt_info {
index c24d7643548ee927b4741f963a20e00925e755d9..124bd139886cb40ce14ab4d34ccae15cf5faefa2 100644 (file)
@@ -75,7 +75,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
 {
        dtm->install = jiffies_to_clock_t(jiffies - stm->install);
        dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
-       dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
+       dtm->firstuse = stm->firstuse ?
+               jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
        dtm->expires = jiffies_to_clock_t(stm->expires);
 }
 
index 04e97bab6f28b9eff4d1efba213c667b9ad596af..ab988940bf0454d3c4c58670840316a910c31b21 100644 (file)
@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
                           struct sockaddr_rxrpc *);
-u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
 int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
index dd7026a000660e9890a50c9846d1027476be4fb6..0335bbd76552a1ab0b509ef051536c097c8a2615 100644 (file)
@@ -25,6 +25,7 @@ struct espintcp_ctx {
        struct espintcp_msg partial;
        void (*saved_data_ready)(struct sock *sk);
        void (*saved_write_space)(struct sock *sk);
+       void (*saved_destruct)(struct sock *sk);
        struct work_struct work;
        bool tx_running;
 };
index 3619c6acf60fa563f7915f6a03647b975eaa1ac8..efc8350b42fb30f9146e4f5df235214b2d4c84d3 100644 (file)
@@ -166,15 +166,18 @@ enum flow_action_mangle_base {
 enum flow_action_hw_stats_bit {
        FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
        FLOW_ACTION_HW_STATS_DELAYED_BIT,
+       FLOW_ACTION_HW_STATS_DISABLED_BIT,
 };
 
 enum flow_action_hw_stats {
-       FLOW_ACTION_HW_STATS_DISABLED = 0,
+       FLOW_ACTION_HW_STATS_DONT_CARE = 0,
        FLOW_ACTION_HW_STATS_IMMEDIATE =
                BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
        FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
        FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
                                   FLOW_ACTION_HW_STATS_DELAYED,
+       FLOW_ACTION_HW_STATS_DISABLED =
+               BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
 };
 
 typedef void (*action_destr)(void *priv);
@@ -325,7 +328,11 @@ __flow_action_hw_stats_check(const struct flow_action *action,
                return true;
        if (!flow_action_mixed_hw_stats_check(action, extack))
                return false;
+
        action_entry = flow_action_first_entry_get(action);
+       if (action_entry->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+               return true;
+
        if (!check_allow_bit &&
            action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) {
                NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
index c8e2bebd8d934a5560927509ecb81e5536e2f8e8..0f0d1efe06ddcd1bcd67000bde1d7b88f0c74153 100644 (file)
@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
        return 1;
 }
 
+static inline int IP_ECN_set_ect1(struct iphdr *iph)
+{
+       u32 check = (__force u32)iph->check;
+
+       if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
+               return 0;
+
+       check += (__force u16)htons(0x100);
+
+       iph->check = (__force __sum16)(check + (check>=0xFFFF));
+       iph->tos ^= INET_ECN_MASK;
+       return 1;
+}
+
 static inline void IP_ECN_clear(struct iphdr *iph)
 {
        iph->tos &= ~INET_ECN_MASK;
@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
        return 1;
 }
 
+static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
+{
+       __be32 from, to;
+
+       if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
+               return 0;
+
+       from = *(__be32 *)iph;
+       to = from ^ htonl(INET_ECN_MASK << 20);
+       *(__be32 *)iph = to;
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+                                    (__force __wsum)to);
+       return 1;
+}
+
 static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
 {
        dscp &= ~INET_ECN_MASK;
@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
        return 0;
 }
 
+static inline int INET_ECN_set_ect1(struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case cpu_to_be16(ETH_P_IP):
+               if (skb_network_header(skb) + sizeof(struct iphdr) <=
+                   skb_tail_pointer(skb))
+                       return IP_ECN_set_ect1(ip_hdr(skb));
+               break;
+
+       case cpu_to_be16(ETH_P_IPV6):
+               if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+                   skb_tail_pointer(skb))
+                       return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
+               break;
+       }
+
+       return 0;
+}
+
 /*
  * RFC 6040 4.2
  *  To decapsulate the inner header at the tunnel egress, a compliant
@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
        int rc;
 
        rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
-       if (!rc && set_ce)
-               INET_ECN_set_ce(skb);
+       if (!rc) {
+               if (set_ce)
+                       INET_ECN_set_ce(skb);
+               else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
+                       INET_ECN_set_ect1(skb);
+       }
 
        return rc;
 }
index 80262d2980f5d8e24b93f573ce9858b126031ea4..1d98828c6649bfa137f86bea4c827d9e3be857e8 100644 (file)
@@ -203,6 +203,7 @@ struct fib6_info {
 struct rt6_info {
        struct dst_entry                dst;
        struct fib6_info __rcu          *from;
+       int                             sernum;
 
        struct rt6key                   rt6i_dst;
        struct rt6key                   rt6i_src;
@@ -291,6 +292,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
        struct fib6_info *from;
        u32 cookie = 0;
 
+       if (rt->sernum)
+               return rt->sernum;
+
        rcu_read_lock();
 
        from = rcu_dereference(rt->from);
index 59e0d4e99f94e70325e477456ed251dcf0e04b47..2ec062aaa9782b71cfd75279c8347d90cff14e81 100644 (file)
@@ -257,7 +257,6 @@ struct fib_dump_filter {
        u32                     table_id;
        /* filter_set is an optimization that an entry is set */
        bool                    filter_set;
-       bool                    dump_all_families;
        bool                    dump_routes;
        bool                    dump_exceptions;
        unsigned char           protocol;
@@ -448,6 +447,16 @@ static inline int fib_num_tclassid_users(struct net *net)
 #endif
 int fib_unmerge(struct net *net);
 
+static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
+const struct net_device *dev)
+{
+       if (nhc->nhc_dev == dev ||
+           l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
+               return true;
+
+       return false;
+}
+
 /* Exported by fib_semantics.c */
 int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
@@ -480,6 +489,8 @@ void fib_nh_common_release(struct fib_nh_common *nhc);
 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
 void fib_trie_init(void);
 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+                        const struct flowi4 *flp);
 
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
index 0e7c5471010bda14c7a17f096036cdf425b63fc3..3bce2019e4da97bde5581e0da049abd64c8ca739 100644 (file)
@@ -68,11 +68,8 @@ static inline bool rsk_is_mptcp(const struct request_sock *req)
        return tcp_rsk(req)->is_mptcp;
 }
 
-void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
-                       int opsize, struct tcp_options_received *opt_rx);
 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
                       unsigned int *size, struct mptcp_out_options *opts);
-void mptcp_rcv_synsent(struct sock *sk);
 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
                          struct mptcp_out_options *opts);
 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
index ab96fb59131c148d5595350c9fad3e7594b28a68..8e001e049497f45e0b12807e24e0f330319bfb4a 100644 (file)
@@ -437,6 +437,13 @@ static inline int rt_genid_ipv4(const struct net *net)
        return atomic_read(&net->ipv4.rt_genid);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(const struct net *net)
+{
+       return atomic_read(&net->ipv6.fib6_sernum);
+}
+#endif
+
 static inline void rt_genid_bump_ipv4(struct net *net)
 {
        atomic_inc(&net->ipv4.rt_genid);
index 9f551f3b69c65f962678b6b863a136066e5b14c3..90690e37a56f0ebf33b2cd8961d89ed5e55f66b2 100644 (file)
@@ -87,7 +87,7 @@ struct nf_conn {
        struct hlist_node       nat_bysource;
 #endif
        /* all members below initialized via memset */
-       u8 __nfct_init_offset[0];
+       struct { } __nfct_init_offset;
 
        /* If we were expected by an expectation, this will be it */
        struct nf_conn *master;
index 6bf69652f57df0a54c69c8c4d9e491409acf6d87..c54a7f707e5039ef7437f9b17a11d4cde530517f 100644 (file)
@@ -127,6 +127,7 @@ enum nf_flow_flags {
        NF_FLOW_HW_DYING,
        NF_FLOW_HW_DEAD,
        NF_FLOW_HW_REFRESH,
+       NF_FLOW_HW_PENDING,
 };
 
 enum flow_offload_type {
index c440ccc861fc70b13f565dbcb138768a562a3e53..8c9f1a7188591327710c112c0c8db8bbb9ca19fb 100644 (file)
@@ -70,6 +70,7 @@ struct nh_grp_entry {
 };
 
 struct nh_group {
+       struct nh_group         *spare; /* spare group for removals */
        u16                     num_nh;
        bool                    mpath;
        bool                    has_v4;
@@ -136,21 +137,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
 {
        unsigned int rc = 1;
 
-       if (nexthop_is_multipath(nh)) {
+       if (nh->is_group) {
                struct nh_group *nh_grp;
 
                nh_grp = rcu_dereference_rtnl(nh->nh_grp);
-               rc = nh_grp->num_nh;
+               if (nh_grp->mpath)
+                       rc = nh_grp->num_nh;
        }
 
        return rc;
 }
 
 static inline
-struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
+struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
 {
-       const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
-
        /* for_nexthops macros in fib_semantics.c grabs a pointer to
         * the nexthop before checking nhsel
         */
@@ -185,12 +185,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh)
 {
        const struct nh_info *nhi;
 
-       if (nexthop_is_multipath(nh)) {
-               if (nexthop_num_path(nh) > 1)
-                       return false;
-               nh = nexthop_mpath_select(nh, 0);
-               if (!nh)
+       if (nh->is_group) {
+               struct nh_group *nh_grp;
+
+               nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+               if (nh_grp->num_nh > 1)
                        return false;
+
+               nh = nh_grp->nh_entries[0].nh;
        }
 
        nhi = rcu_dereference_rtnl(nh->nh_info);
@@ -216,16 +218,79 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel)
        BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
        BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
 
-       if (nexthop_is_multipath(nh)) {
-               nh = nexthop_mpath_select(nh, nhsel);
-               if (!nh)
-                       return NULL;
+       if (nh->is_group) {
+               struct nh_group *nh_grp;
+
+               nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+               if (nh_grp->mpath) {
+                       nh = nexthop_mpath_select(nh_grp, nhsel);
+                       if (!nh)
+                               return NULL;
+               }
        }
 
        nhi = rcu_dereference_rtnl(nh->nh_info);
        return &nhi->fib_nhc;
 }
 
+/* called from fib_table_lookup with rcu_lock */
+static inline
+struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
+                                            int fib_flags,
+                                            const struct flowi4 *flp,
+                                            int *nhsel)
+{
+       struct nh_info *nhi;
+
+       if (nh->is_group) {
+               struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+               int i;
+
+               for (i = 0; i < nhg->num_nh; i++) {
+                       struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+                       nhi = rcu_dereference(nhe->nh_info);
+                       if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+                               *nhsel = i;
+                               return &nhi->fib_nhc;
+                       }
+               }
+       } else {
+               nhi = rcu_dereference(nh->nh_info);
+               if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+                       *nhsel = 0;
+                       return &nhi->fib_nhc;
+               }
+       }
+
+       return NULL;
+}
+
+static inline bool nexthop_uses_dev(const struct nexthop *nh,
+                                   const struct net_device *dev)
+{
+       struct nh_info *nhi;
+
+       if (nh->is_group) {
+               struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+               int i;
+
+               for (i = 0; i < nhg->num_nh; i++) {
+                       struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+                       nhi = rcu_dereference(nhe->nh_info);
+                       if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+                               return true;
+               }
+       } else {
+               nhi = rcu_dereference(nh->nh_info);
+               if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+                       return true;
+       }
+
+       return false;
+}
+
 static inline unsigned int fib_info_num_path(const struct fib_info *fi)
 {
        if (unlikely(fi->nh))
@@ -263,8 +328,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
 {
        struct nh_info *nhi;
 
-       if (nexthop_is_multipath(nh)) {
-               nh = nexthop_mpath_select(nh, 0);
+       if (nh->is_group) {
+               struct nh_group *nh_grp;
+
+               nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+               nh = nexthop_mpath_select(nh_grp, 0);
                if (!nh)
                        return NULL;
        }
index 25d2ec4c8f00495cb8b25b0e7a24d1f808e50c90..8428aa6142655666715743e60bc74855f9f8d008 100644 (file)
@@ -407,6 +407,7 @@ struct tcf_block {
        struct mutex lock;
        struct list_head chain_list;
        u32 index; /* block index for shared blocks */
+       u32 classid; /* which class this block belongs to */
        refcount_t refcnt;
        struct net *net;
        struct Qdisc *q;
index dcf9a72eeaa6912202e8a1ca6cf800f7401bf517..6f8e60c6fbc746ea7ed2c2ddc97bffdbb7da4fc1 100644 (file)
@@ -1376,7 +1376,6 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
        rx_opt->num_sacks = 0;
 }
 
-u32 tcp_default_init_rwnd(u32 mss);
 void tcp_cwnd_restart(struct sock *sk, s32 delta);
 
 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
@@ -1421,6 +1420,19 @@ static inline int tcp_full_space(const struct sock *sk)
        return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
+/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+ * If 87.5 % (7/8) of the space has been consumed, we want to override
+ * SO_RCVLOWAT constraint, since we are receiving skbs with too small
+ * len/truesize ratio.
+ */
+static inline bool tcp_rmem_pressure(const struct sock *sk)
+{
+       int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+       int threshold = rcvbuf - (rcvbuf >> 3);
+
+       return atomic_read(&sk->sk_rmem_alloc) > threshold;
+}
+
 extern void tcp_openreq_init_rwin(struct request_sock *req,
                                  const struct sock *sk_listener,
                                  const struct dst_entry *dst);
index bf9eb482393322928e2c979c6169d2c8c1b22054..18cd4f418464d2c7fd26d6381d474e21dac60833 100644 (file)
@@ -135,6 +135,8 @@ struct tls_sw_context_tx {
        struct tls_rec *open_rec;
        struct list_head tx_list;
        atomic_t encrypt_pending;
+       /* protect crypto_wait with encrypt_pending */
+       spinlock_t encrypt_compl_lock;
        int async_notify;
        u8 async_capable:1;
 
@@ -155,6 +157,8 @@ struct tls_sw_context_rx {
        u8 async_capable:1;
        u8 decrypted:1;
        atomic_t decrypt_pending;
+       /* protect crypto_wait with decrypt_pending*/
+       spinlock_t decrypt_compl_lock;
        bool async_notify;
 };
 
index 4b1f95e083070ae6b4ff81eb1b5fd2433a93e282..e7312ceb2794aa84b11e00eeba0b7ace586dc256 100644 (file)
@@ -143,14 +143,12 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb
                         __be16 df, __be16 src_port, __be16 dst_port,
                         bool xnet, bool nocheck);
 
-#if IS_ENABLED(CONFIG_IPV6)
 int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                         struct sk_buff *skb,
                         struct net_device *dev, struct in6_addr *saddr,
                         struct in6_addr *daddr,
                         __u8 prio, __u8 ttl, __be32 label,
                         __be16 src_port, __be16 dst_port, bool nocheck);
-#endif
 
 void udp_tunnel_sock_release(struct socket *sock);
 
index 1b28ce1aba07bf3b8b71a7cf5b4979b47f573672..325fdaa3bb66313ceccff78719eb4e99715ac2ae 100644 (file)
@@ -88,7 +88,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
 
 static inline void uobj_put_destroy(struct ib_uobject *uobj)
 {
-       rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+       rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
 }
 
 static inline void uobj_put_read(struct ib_uobject *uobj)
index 6d6a3947c8b76c4cc08149d1cee9f99619af277c..efc8b613d486dc2bf460b3f0437d5f06a07dbf09 100644 (file)
@@ -502,6 +502,7 @@ struct ocelot {
        unsigned int                    num_stats;
 
        int                             shared_queue_sz;
+       int                             num_mact_rows;
 
        struct net_device               *hw_bridge_dev;
        u16                             bridge_mask;
index a36b7227a15ad5dee698a60d3c15d5fae63c04ee..334842daa90459c8f4aeced7a7c0fcbd42ffdf29 100644 (file)
@@ -61,6 +61,7 @@ struct snd_rawmidi_runtime {
        size_t avail_min;       /* min avail for wakeup */
        size_t avail;           /* max used buffer for wakeup */
        size_t xruns;           /* over/underruns counter */
+       int buffer_ref;         /* buffer reference count */
        /* misc */
        spinlock_t lock;
        wait_queue_head_t sleep;
index 1897822a9150659127631d4220702bd639beef5f..26d871f96e9407f9a0b96a58bdf317593ddeb6ca 100644 (file)
@@ -24,7 +24,7 @@
  *
  * @pid: Put 0 for global total, while positive pid for process total.
  *
- * @size: Virtual size of the allocation in bytes.
+ * @size: Size of the allocation in bytes.
  *
  */
 TRACE_EVENT(gpu_mem_total,
index 596e0a8034772b5bbdd29288c6f04544b2b4ad6b..132c3c778a43260745a75f3dc36e4eb3c706b61f 100644 (file)
@@ -692,11 +692,10 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
 
 TRACE_EVENT(xprtrdma_post_send,
        TP_PROTO(
-               const struct rpcrdma_req *req,
-               int status
+               const struct rpcrdma_req *req
        ),
 
-       TP_ARGS(req, status),
+       TP_ARGS(req),
 
        TP_STRUCT__entry(
                __field(const void *, req)
@@ -705,7 +704,6 @@ TRACE_EVENT(xprtrdma_post_send,
                __field(unsigned int, client_id)
                __field(int, num_sge)
                __field(int, signaled)
-               __field(int, status)
        ),
 
        TP_fast_assign(
@@ -718,15 +716,13 @@ TRACE_EVENT(xprtrdma_post_send,
                __entry->sc = req->rl_sendctx;
                __entry->num_sge = req->rl_wr.num_sge;
                __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
-               __entry->status = status;
        ),
 
-       TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
+       TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
                __entry->task_id, __entry->client_id,
                __entry->req, __entry->sc, __entry->num_sge,
                (__entry->num_sge == 1 ? "" : "s"),
-               (__entry->signaled ? "signaled " : ""),
-               __entry->status
+               (__entry->signaled ? "signaled" : "")
        )
 );
 
index 191fe447f9908c64bafad0ae55724136c75c08ea..ba9efdc848f970b1b05e9fd0a30091a37159029c 100644 (file)
@@ -1112,18 +1112,17 @@ TRACE_EVENT(rxrpc_rtt_tx,
 TRACE_EVENT(rxrpc_rtt_rx,
            TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
                     rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
-                    s64 rtt, u8 nr, s64 avg),
+                    u32 rtt, u32 rto),
 
-           TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+           TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
                    __field(enum rxrpc_rtt_rx_trace,    why             )
-                   __field(u8,                         nr              )
                    __field(rxrpc_serial_t,             send_serial     )
                    __field(rxrpc_serial_t,             resp_serial     )
-                   __field(s64,                        rtt             )
-                   __field(u64,                        avg             )
+                   __field(u32,                        rtt             )
+                   __field(u32,                        rto             )
                             ),
 
            TP_fast_assign(
@@ -1132,18 +1131,16 @@ TRACE_EVENT(rxrpc_rtt_rx,
                    __entry->send_serial = send_serial;
                    __entry->resp_serial = resp_serial;
                    __entry->rtt = rtt;
-                   __entry->nr = nr;
-                   __entry->avg = avg;
+                   __entry->rto = rto;
                           ),
 
-           TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+           TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u",
                      __entry->call,
                      __print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
                      __entry->send_serial,
                      __entry->resp_serial,
                      __entry->rtt,
-                     __entry->nr,
-                     __entry->avg)
+                     __entry->rto)
            );
 
 TRACE_EVENT(rxrpc_timer,
@@ -1544,6 +1541,41 @@ TRACE_EVENT(rxrpc_notify_socket,
                      __entry->serial)
            );
 
+TRACE_EVENT(rxrpc_rx_discard_ack,
+           TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
+                    rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
+                    rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
+
+           TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
+                   prev_pkt, call_ackr_prev),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,       debug_id        )
+                   __field(rxrpc_serial_t,     serial          )
+                   __field(rxrpc_seq_t,        first_soft_ack)
+                   __field(rxrpc_seq_t,        call_ackr_first)
+                   __field(rxrpc_seq_t,        prev_pkt)
+                   __field(rxrpc_seq_t,        call_ackr_prev)
+                            ),
+
+           TP_fast_assign(
+                   __entry->debug_id           = debug_id;
+                   __entry->serial             = serial;
+                   __entry->first_soft_ack     = first_soft_ack;
+                   __entry->call_ackr_first    = call_ackr_first;
+                   __entry->prev_pkt           = prev_pkt;
+                   __entry->call_ackr_prev     = call_ackr_prev;
+                          ),
+
+           TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
+                     __entry->debug_id,
+                     __entry->serial,
+                     __entry->first_soft_ack,
+                     __entry->call_ackr_first,
+                     __entry->prev_pkt,
+                     __entry->call_ackr_prev)
+           );
+
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
index 784814160197bb7f4daf94d1508d31393f7f7a91..9c66e59d859cb3559d32f0e87d75bf498c916b2c 100644 (file)
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->name, dev_name(bdi->dev),
+               strlcpy(__entry->name, bdi_dev_name(bdi),
                        ARRAY_SIZE(__entry->name));
                __entry->rmean          = stat[0].mean;
                __entry->rmin           = stat[0].min;
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->name, dev_name(bdi->dev),
+               strlcpy(__entry->name, bdi_dev_name(bdi),
                        ARRAY_SIZE(__entry->name));
                __entry->lat = div_u64(lat, 1000);
        ),
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->name, dev_name(bdi->dev),
+               strlcpy(__entry->name, bdi_dev_name(bdi),
                        ARRAY_SIZE(__entry->name));
                __entry->msg    = msg;
                __entry->step   = step;
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer,
        ),
 
        TP_fast_assign(
-               strlcpy(__entry->name, dev_name(bdi->dev),
+               strlcpy(__entry->name, bdi_dev_name(bdi),
                        ARRAY_SIZE(__entry->name));
                __entry->status         = status;
                __entry->step           = step;
index 65f69723cbdc1cce661115352b098de49381783c..d28b4ce744d59579a8ef5e9e004a9210c3bfdab6 100644 (file)
@@ -346,6 +346,10 @@ struct drm_amdgpu_gem_userptr {
 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK               0x3FFF
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT                43
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK         0x1
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT       44
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK                0x1
+#define AMDGPU_TILING_SCANOUT_SHIFT                    63
+#define AMDGPU_TILING_SCANOUT_MASK                     0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \
index 7bbf1b65be10bf26c5a9309dbeddc34de8072272..f9b7fdd951e487e8075cbebfb7a587ecb7df3334 100644 (file)
@@ -73,7 +73,7 @@ struct bpf_insn {
 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
 struct bpf_lpm_trie_key {
        __u32   prefixlen;      /* up to 32 for AF_INET, 128 for AF_INET6 */
-       __u8    data[]; /* Arbitrary size */
+       __u8    data[0];        /* Arbitrary size */
 };
 
 struct bpf_cgroup_storage_key {
index e83954c69fff7fd256a0838a9ba493a6c37d44c3..f880d28311605cb941bbc53d94ff4921f88e35df 100644 (file)
@@ -45,13 +45,13 @@ struct dlm_lock_params {
        void __user *bastaddr;
        struct dlm_lksb __user *lksb;
        char lvb[DLM_USER_LVB_LEN];
-       char name[];
+       char name[0];
 };
 
 struct dlm_lspace_params {
        __u32 flags;
        __u32 minor;
-       char name[];
+       char name[0];
 };
 
 struct dlm_purge_params {
index dbc7092e04b5a46afa54a80b45e227d5a20dfc05..7f30393b92c3b4f26486964997f91ac8bca5ec4e 100644 (file)
@@ -39,6 +39,12 @@ struct dma_buf_sync {
 
 #define DMA_BUF_BASE           'b'
 #define DMA_BUF_IOCTL_SYNC     _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+/* 32/64bitness of this uapi was botched in android, there's no difference
+ * between them in actual uapi, they're just different numbers.
+ */
 #define DMA_BUF_SET_NAME       _IOW(DMA_BUF_BASE, 1, const char *)
+#define DMA_BUF_SET_NAME_A     _IOW(DMA_BUF_BASE, 1, u32)
+#define DMA_BUF_SET_NAME_B     _IOW(DMA_BUF_BASE, 1, u64)
 
 #endif
index 7a900b2377b603df04f61461888566aa125fc061..8c0bc24d5d95555754e6bfe56e0ba5543faa460d 100644 (file)
@@ -34,7 +34,7 @@ struct fiemap {
        __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
        __u32 fm_extent_count;  /* size of fm_extents array (in) */
        __u32 fm_reserved;
-       struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
+       struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
 };
 
 #define FIEMAP_MAX_OFFSET      (~0ULL)
index 991b2b7ada7a3cd88b4b44723e0b5757e4f3c4eb..8f24404ad04f14516af24523f32b9d07b5346c98 100644 (file)
@@ -119,8 +119,8 @@ enum hv_fcopy_op {
 
 struct hv_fcopy_hdr {
        __u32 operation;
-       uuid_le service_id0; /* currently unused */
-       uuid_le service_id1; /* currently unused */
+       __u8 service_id0[16]; /* currently unused */
+       __u8 service_id1[16]; /* currently unused */
 } __attribute__((packed));
 
 #define OVER_WRITE     0x1
index b122cfac71288c35e8c18c60744a7ff54d5c8b90..683878036d76d7ccdd6fe7a6e907a7977a40067b 100644 (file)
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
        __u8  proto;            /* protocol ID field - varies           */
        __u8  split_flag;       /* for use with split packets           */
        __be16   sequence;      /* sequence number                      */
-       __u8  payload[];        /* space remaining in packet (504 bytes)*/
+       __u8  payload[0];       /* space remaining in packet (504 bytes)*/
 };
 #define RFC1201_HDR_SIZE 4
 
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
  */
 struct arc_rfc1051 {
        __u8 proto;             /* ARC_P_RFC1051_ARP/RFC1051_IP */
-       __u8 payload[]; /* 507 bytes                    */
+       __u8 payload[0];        /* 507 bytes                    */
 };
 #define RFC1051_HDR_SIZE 1
 
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
 struct arc_eth_encap {
        __u8 proto;             /* Always ARC_P_ETHER                   */
        struct ethhdr eth;      /* standard ethernet header (yuck!)     */
-       __u8 payload[]; /* 493 bytes                            */
+       __u8 payload[0];        /* 493 bytes                            */
 };
 #define ETH_ENCAP_HDR_SIZE 14
 
index 4ad3496e5c4311a4e3057a17b4c244d3bed9822f..e907b7091a4636bf5b23bac049de5c016beca9c4 100644 (file)
@@ -285,6 +285,11 @@ struct iommu_gpasid_bind_data_vtd {
        __u32 emt;
 };
 
+#define IOMMU_SVA_VTD_GPASID_MTS_MASK  (IOMMU_SVA_VTD_GPASID_CD | \
+                                        IOMMU_SVA_VTD_GPASID_EMTE | \
+                                        IOMMU_SVA_VTD_GPASID_PCD |  \
+                                        IOMMU_SVA_VTD_GPASID_PWT)
+
 /**
  * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
  * @version:   Version of this data structure
index 98e29e7f54ace2f7ca13b1dfd5c337976b2192d9..00c08120f3ba60c2719458fd8903dd953744a1df 100644 (file)
@@ -57,7 +57,7 @@ struct mmc_ioc_cmd {
  */
 struct mmc_ioc_multi_cmd {
        __u64 num_of_cmds;
-       struct mmc_ioc_cmd cmds[];
+       struct mmc_ioc_cmd cmds[0];
 };
 
 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
index 67e31f3291904199f12e48a4a6f2e0925cc33d81..66048cc5d7b361da368d006688c614df440ea51f 100644 (file)
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
 
 struct net_dm_config_msg {
        __u32 entries;
-       struct net_dm_config_entry options[];
+       struct net_dm_config_entry options[0];
 };
 
 struct net_dm_alert_msg {
        __u32 entries;
-       struct net_dm_drop_point points[];
+       struct net_dm_drop_point points[0];
 };
 
 struct net_dm_user_msg {
index 73b26a280c4fd7e3db4bfde98b4daee70d699a7c..9acf757bc1f79328c8693b1edac37d1b36b8d225 100644 (file)
@@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple {
 struct ebt_mac_wormhash {
        int table[257];
        int poolsize;
-       struct ebt_mac_wormhash_tuple pool[];
+       struct ebt_mac_wormhash_tuple pool[0];
 };
 
 #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
index ea375082b3ac7ab9fa6ea6919ce35cb0ff9a6588..0be685272eb180074999aaf13bfdebd302903765 100644 (file)
@@ -93,6 +93,64 @@ struct usb_raw_ep_io {
        __u8            data[0];
 };
 
+/* Maximum number of non-control endpoints in struct usb_raw_eps_info. */
+#define USB_RAW_EPS_NUM_MAX    30
+
+/* Maximum length of UDC endpoint name in struct usb_raw_ep_info. */
+#define USB_RAW_EP_NAME_MAX    16
+
+/* Used as addr in struct usb_raw_ep_info if endpoint accepts any address. */
+#define USB_RAW_EP_ADDR_ANY    0xff
+
+/*
+ * struct usb_raw_ep_caps - exposes endpoint capabilities from struct usb_ep
+ *     (technically from its member struct usb_ep_caps).
+ */
+struct usb_raw_ep_caps {
+       __u32   type_control    : 1;
+       __u32   type_iso        : 1;
+       __u32   type_bulk       : 1;
+       __u32   type_int        : 1;
+       __u32   dir_in          : 1;
+       __u32   dir_out         : 1;
+};
+
+/*
+ * struct usb_raw_ep_limits - exposes endpoint limits from struct usb_ep.
+ * @maxpacket_limit: Maximum packet size value supported by this endpoint.
+ * @max_streams: maximum number of streams supported by this endpoint
+ *     (actual number is 2^n).
+ * @reserved: Empty, reserved for potential future extensions.
+ */
+struct usb_raw_ep_limits {
+       __u16   maxpacket_limit;
+       __u16   max_streams;
+       __u32   reserved;
+};
+
+/*
+ * struct usb_raw_ep_info - stores information about a gadget endpoint.
+ * @name: Name of the endpoint as it is defined in the UDC driver.
+ * @addr: Address of the endpoint that must be specified in the endpoint
+ *     descriptor passed to USB_RAW_IOCTL_EP_ENABLE ioctl.
+ * @caps: Endpoint capabilities.
+ * @limits: Endpoint limits.
+ */
+struct usb_raw_ep_info {
+       __u8                            name[USB_RAW_EP_NAME_MAX];
+       __u32                           addr;
+       struct usb_raw_ep_caps          caps;
+       struct usb_raw_ep_limits        limits;
+};
+
+/*
+ * struct usb_raw_eps_info - argument for USB_RAW_IOCTL_EPS_INFO ioctl.
+ * eps: Structures that store information about non-control endpoints.
+ */
+struct usb_raw_eps_info {
+       struct usb_raw_ep_info  eps[USB_RAW_EPS_NUM_MAX];
+};
+
 /*
  * Initializes a Raw Gadget instance.
  * Accepts a pointer to the usb_raw_init struct as an argument.
@@ -115,37 +173,38 @@ struct usb_raw_ep_io {
 #define USB_RAW_IOCTL_EVENT_FETCH      _IOR('U', 2, struct usb_raw_event)
 
 /*
- * Queues an IN (OUT for READ) urb as a response to the last control request
- * received on endpoint 0, provided that was an IN (OUT for READ) request and
- * waits until the urb is completed. Copies received data to user for READ.
+ * Queues an IN (OUT for READ) request as a response to the last setup request
+ * received on endpoint 0 (provided that was an IN (OUT for READ) request), and
+ * waits until the request is completed. Copies received data to user for READ.
  * Accepts a pointer to the usb_raw_ep_io struct as an argument.
- * Returns length of trasferred data on success or negative error code on
+ * Returns length of transferred data on success or negative error code on
  * failure.
  */
 #define USB_RAW_IOCTL_EP0_WRITE                _IOW('U', 3, struct usb_raw_ep_io)
 #define USB_RAW_IOCTL_EP0_READ         _IOWR('U', 4, struct usb_raw_ep_io)
 
 /*
- * Finds an endpoint that supports the transfer type specified in the
- * descriptor and enables it.
- * Accepts a pointer to the usb_endpoint_descriptor struct as an argument.
+ * Finds an endpoint that satisfies the parameters specified in the provided
+ * descriptors (address, transfer type, etc.) and enables it.
+ * Accepts a pointer to the usb_raw_ep_descs struct as an argument.
  * Returns enabled endpoint handle on success or negative error code on failure.
  */
 #define USB_RAW_IOCTL_EP_ENABLE                _IOW('U', 5, struct usb_endpoint_descriptor)
 
-/* Disables specified endpoint.
+/*
+ * Disables specified endpoint.
  * Accepts endpoint handle as an argument.
  * Returns 0 on success or negative error code on failure.
  */
 #define USB_RAW_IOCTL_EP_DISABLE       _IOW('U', 6, __u32)
 
 /*
- * Queues an IN (OUT for READ) urb as a response to the last control request
- * received on endpoint usb_raw_ep_io.epprovided that was an IN (OUT for READ)
- * request and waits until the urb is completed. Copies received data to user
- * for READ.
+ * Queues an IN (OUT for READ) request as a response to the last setup request
+ * received on endpoint usb_raw_ep_io.ep (provided that was an IN (OUT for READ)
+ * request), and waits until the request is completed. Copies received data to
+ * user for READ.
  * Accepts a pointer to the usb_raw_ep_io struct as an argument.
- * Returns length of trasferred data on success or negative error code on
+ * Returns length of transferred data on success or negative error code on
  * failure.
  */
 #define USB_RAW_IOCTL_EP_WRITE         _IOW('U', 7, struct usb_raw_ep_io)
@@ -164,4 +223,27 @@ struct usb_raw_ep_io {
  */
 #define USB_RAW_IOCTL_VBUS_DRAW                _IOW('U', 10, __u32)
 
+/*
+ * Fills in the usb_raw_eps_info structure with information about non-control
+ * endpoints available for the currently connected UDC.
+ * Returns the number of available endpoints on success or negative error code
+ * on failure.
+ */
+#define USB_RAW_IOCTL_EPS_INFO         _IOR('U', 11, struct usb_raw_eps_info)
+
+/*
+ * Stalls a pending control request on endpoint 0.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP0_STALL                _IO('U', 12)
+
+/*
+ * Sets or clears halt or wedge status of the endpoint.
+ * Accepts endpoint handle as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_SET_HALT      _IOW('U', 13, __u32)
+#define USB_RAW_IOCTL_EP_CLEAR_HALT    _IOW('U', 14, __u32)
+#define USB_RAW_IOCTL_EP_SET_WEDGE     _IOW('U', 15, __u32)
+
 #endif /* _UAPI__LINUX_USB_RAW_GADGET_H */
index 5f3b9fec7b5f4491ad9f38beea7447a305ff4fb0..ff7cfdc6cb44dc98dde15e2e39ebebd80ec8f916 100644 (file)
@@ -304,7 +304,7 @@ enum xfrm_attr_type_t {
        XFRMA_PROTO,            /* __u8 */
        XFRMA_ADDRESS_FILTER,   /* struct xfrm_address_filter */
        XFRMA_PAD,
-       XFRMA_OFFLOAD_DEV,      /* struct xfrm_state_offload */
+       XFRMA_OFFLOAD_DEV,      /* struct xfrm_user_offload */
        XFRMA_SET_MARK,         /* __u32 */
        XFRMA_SET_MARK_MASK,    /* __u32 */
        XFRMA_IF_ID,            /* __u32 */
index 7f5930801f722164013fc143dff0fcb3568dfbdd..3ae65e93235cf77ffbae4a37fff9bbe35e600933 100644 (file)
@@ -209,7 +209,7 @@ struct fc_bsg_host_vendor {
        __u64 vendor_id;
 
        /* start of vendor command area */
-       __u32 vendor_cmd[];
+       __u32 vendor_cmd[0];
 };
 
 /* Response:
index 9e22ee8fbd75e2dbe2d80fa0952e0393e4ee01dd..74a5ac65644f3f825936f91134e14095e3f43851 100644 (file)
@@ -39,22 +39,6 @@ config TOOLS_SUPPORT_RELR
 config CC_HAS_ASM_INLINE
        def_bool $(success,echo 'void foo(void) { asm inline (""); }' | $(CC) -x c - -c -o /dev/null)
 
-config CC_HAS_WARN_MAYBE_UNINITIALIZED
-       def_bool $(cc-option,-Wmaybe-uninitialized)
-       help
-         GCC >= 4.7 supports this option.
-
-config CC_DISABLE_WARN_MAYBE_UNINITIALIZED
-       bool
-       depends on CC_HAS_WARN_MAYBE_UNINITIALIZED
-       default CC_IS_GCC && GCC_VERSION < 40900  # unreliable for GCC < 4.9
-       help
-         GCC's -Wmaybe-uninitialized is not reliable by definition.
-         Lots of false positive warnings are produced in some cases.
-
-         If this option is enabled, -Wno-maybe-uninitialzed is passed
-         to the compiler to suppress maybe-uninitialized warnings.
-
 config CONSTRUCTORS
        bool
        depends on !UML
@@ -1257,14 +1241,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
 config CC_OPTIMIZE_FOR_PERFORMANCE_O3
        bool "Optimize more for performance (-O3)"
        depends on ARC
-       imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED  # avoid false positives
        help
          Choosing this option will pass "-O3" to your compiler to optimize
          the kernel yet more for performance.
 
 config CC_OPTIMIZE_FOR_SIZE
        bool "Optimize for size (-Os)"
-       imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED  # avoid false positives
        help
          Choosing this option will pass "-Os" to your compiler resulting
          in a smaller kernel.
@@ -2279,6 +2261,9 @@ config ASN1
 
 source "kernel/Kconfig.locks"
 
+config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+       bool
+
 config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
        bool
 
index 8ec1be4d7d5121498e606ef522a7c4eeeef3e52b..7a38012e1af742124298ca2b299886076631f3df 100644 (file)
@@ -542,7 +542,7 @@ void __weak free_initrd_mem(unsigned long start, unsigned long end)
 }
 
 #ifdef CONFIG_KEXEC_CORE
-static bool kexec_free_initrd(void)
+static bool __init kexec_free_initrd(void)
 {
        unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
        unsigned long crashk_end   = (unsigned long)__va(crashk_res.end);
index a48617f2e5e5bcf789998415c7ea37c8e97de55d..03371976d38729345991c874cf6ad57d567492f1 100644 (file)
@@ -257,6 +257,47 @@ static int __init loglevel(char *str)
 
 early_param("loglevel", loglevel);
 
+#ifdef CONFIG_BLK_DEV_INITRD
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+       u32 size, csum;
+       char *data;
+       u32 *hdr;
+
+       if (!initrd_end)
+               return NULL;
+
+       data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+       if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+               return NULL;
+
+       hdr = (u32 *)(data - 8);
+       size = hdr[0];
+       csum = hdr[1];
+
+       data = ((void *)hdr) - size;
+       if ((unsigned long)data < initrd_start) {
+               pr_err("bootconfig size %d is greater than initrd size %ld\n",
+                       size, initrd_end - initrd_start);
+               return NULL;
+       }
+
+       /* Remove bootconfig from initramfs/initrd */
+       initrd_end = (unsigned long)data;
+       if (_size)
+               *_size = size;
+       if (_csum)
+               *_csum = csum;
+
+       return data;
+}
+#else
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+       return NULL;
+}
+#endif
+
 #ifdef CONFIG_BOOT_CONFIG
 
 char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
@@ -357,9 +398,11 @@ static void __init setup_boot_config(const char *cmdline)
        int pos;
        u32 size, csum;
        char *data, *copy;
-       u32 *hdr;
        int ret;
 
+       /* Cut out the bootconfig data even if we have no bootconfig option */
+       data = get_boot_config_from_initrd(&size, &csum);
+
        strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
        parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
                   bootconfig_params);
@@ -367,16 +410,10 @@ static void __init setup_boot_config(const char *cmdline)
        if (!bootconfig_found)
                return;
 
-       if (!initrd_end)
-               goto not_found;
-
-       data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
-       if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
-               goto not_found;
-
-       hdr = (u32 *)(data - 8);
-       size = hdr[0];
-       csum = hdr[1];
+       if (!data) {
+               pr_err("'bootconfig' found on command line, but no bootconfig found\n");
+               return;
+       }
 
        if (size >= XBC_DATA_MAX) {
                pr_err("bootconfig size %d greater than max size %d\n",
@@ -384,10 +421,6 @@ static void __init setup_boot_config(const char *cmdline)
                return;
        }
 
-       data = ((void *)hdr) - size;
-       if ((unsigned long)data < initrd_start)
-               goto not_found;
-
        if (boot_config_checksum((unsigned char *)data, size) != csum) {
                pr_err("bootconfig checksum failed\n");
                return;
@@ -417,11 +450,15 @@ static void __init setup_boot_config(const char *cmdline)
                extra_init_args = xbc_make_cmdline("init");
        }
        return;
-not_found:
-       pr_err("'bootconfig' found on command line, but no bootconfig found\n");
 }
+
 #else
-#define setup_boot_config(cmdline)     do { } while (0)
+
+static void __init setup_boot_config(const char *cmdline)
+{
+       /* Remove bootconfig data from initrd */
+       get_boot_config_from_initrd(NULL, NULL);
+}
 
 static int __init warn_bootconfig(char *str)
 {
@@ -1001,6 +1038,8 @@ asmlinkage __visible void __init start_kernel(void)
 
        /* Do the rest non-__init'ed, we're now alive */
        arch_call_rest_init();
+
+       prevent_tail_call_optimization();
 }
 
 /* Call all constructor functions linked into the kernel. */
index dc8307bf2d74d3b550d2fac210dae09920ef0bcf..beff0cfcd1e874dd3cb03fd23c1a28fb67b373c2 100644 (file)
@@ -142,6 +142,7 @@ struct mqueue_inode_info {
 
        struct sigevent notify;
        struct pid *notify_owner;
+       u32 notify_self_exec_id;
        struct user_namespace *notify_user_ns;
        struct user_struct *user;       /* user who created, for accounting */
        struct sock *notify_sock;
@@ -773,28 +774,44 @@ static void __do_notify(struct mqueue_inode_info *info)
         * synchronously. */
        if (info->notify_owner &&
            info->attr.mq_curmsgs == 1) {
-               struct kernel_siginfo sig_i;
                switch (info->notify.sigev_notify) {
                case SIGEV_NONE:
                        break;
-               case SIGEV_SIGNAL:
-                       /* sends signal */
+               case SIGEV_SIGNAL: {
+                       struct kernel_siginfo sig_i;
+                       struct task_struct *task;
+
+                       /* do_mq_notify() accepts sigev_signo == 0, why?? */
+                       if (!info->notify.sigev_signo)
+                               break;
 
                        clear_siginfo(&sig_i);
                        sig_i.si_signo = info->notify.sigev_signo;
                        sig_i.si_errno = 0;
                        sig_i.si_code = SI_MESGQ;
                        sig_i.si_value = info->notify.sigev_value;
-                       /* map current pid/uid into info->owner's namespaces */
                        rcu_read_lock();
+                       /* map current pid/uid into info->owner's namespaces */
                        sig_i.si_pid = task_tgid_nr_ns(current,
                                                ns_of_pid(info->notify_owner));
-                       sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
+                       sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
+                                               current_uid());
+                       /*
+                        * We can't use kill_pid_info(), this signal should
+                        * bypass check_kill_permission(). It is from kernel
+                        * but si_fromuser() can't know this.
+                        * We do check the self_exec_id, to avoid sending
+                        * signals to programs that don't expect them.
+                        */
+                       task = pid_task(info->notify_owner, PIDTYPE_TGID);
+                       if (task && task->self_exec_id ==
+                                               info->notify_self_exec_id) {
+                               do_send_sig_info(info->notify.sigev_signo,
+                                               &sig_i, task, PIDTYPE_TGID);
+                       }
                        rcu_read_unlock();
-
-                       kill_pid_info(info->notify.sigev_signo,
-                                     &sig_i, info->notify_owner);
                        break;
+               }
                case SIGEV_THREAD:
                        set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
                        netlink_sendskb(info->notify_sock, info->notify_cookie);
@@ -1383,6 +1400,7 @@ retry:
                        info->notify.sigev_signo = notification->sigev_signo;
                        info->notify.sigev_value = notification->sigev_value;
                        info->notify.sigev_notify = SIGEV_SIGNAL;
+                       info->notify_self_exec_id = current->self_exec_id;
                        break;
                }
 
index 7acccfded7cb034c9aeada477af8e0bf61302520..cfa0045e748d55a5e0b1fbd608fe477b8cf4c96f 100644 (file)
@@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
                        total++;
        }
 
-       *new_pos = pos + 1;
+       ipc = NULL;
        if (total >= ids->in_use)
-               return NULL;
+               goto out;
 
        for (; pos < ipc_mni; pos++) {
                ipc = idr_find(&ids->ipcs_idr, pos);
                if (ipc != NULL) {
                        rcu_read_lock();
                        ipc_lock_object(ipc);
-                       return ipc;
+                       break;
                }
        }
-
-       /* Out of range - return NULL to terminate iteration */
-       return NULL;
+out:
+       *new_pos = pos + 1;
+       return ipc;
 }
 
 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
index 95d77770353c9bb5af0311f5813471e11d70c86e..1d6120fd5ba687293cef2bda8902d9c97f64d22f 100644 (file)
@@ -486,7 +486,12 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
        if (!(map->map_flags & BPF_F_MMAPABLE))
                return -EINVAL;
 
-       return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
+       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
+           PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
+               return -EINVAL;
+
+       return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
+                                  vma->vm_pgoff + pgoff);
 }
 
 const struct bpf_map_ops array_map_ops = {
index 7626b802447128f4fc5ec789e24d9bb6d8e96a27..4e6dee19a668f09ebab03d535a79b663c2b42ba5 100644 (file)
@@ -623,9 +623,20 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
 
        mutex_lock(&map->freeze_mutex);
 
-       if ((vma->vm_flags & VM_WRITE) && map->frozen) {
-               err = -EPERM;
-               goto out;
+       if (vma->vm_flags & VM_WRITE) {
+               if (map->frozen) {
+                       err = -EPERM;
+                       goto out;
+               }
+               /* map is meant to be read-only, so do not allow mapping as
+                * writable, because it's possible to leak a writable page
+                * reference and allows user-space to still modify it after
+                * freezing, while verifier will assume contents do not change
+                */
+               if (map->map_flags & BPF_F_RDONLY_PROG) {
+                       err = -EACCES;
+                       goto out;
+               }
        }
 
        /* set default open/close callbacks */
@@ -1485,8 +1496,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
        if (err)
                goto free_value;
 
-       if (copy_to_user(uvalue, value, value_size) != 0)
+       if (copy_to_user(uvalue, value, value_size) != 0) {
+               err = -EFAULT;
                goto free_value;
+       }
 
        err = 0;
 
index fa1d8245b9257fe6f0f40dfca35702ebe7662000..efe14cf24bc6564e6ef878690cd0a1ffacae728f 100644 (file)
@@ -1168,14 +1168,14 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
         * but must be positive otherwise set to worse case bounds
         * and refine later from tnum.
         */
-       if (reg->s32_min_value > 0)
-               reg->smin_value = reg->s32_min_value;
-       else
-               reg->smin_value = 0;
-       if (reg->s32_max_value > 0)
+       if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
                reg->smax_value = reg->s32_max_value;
        else
                reg->smax_value = U32_MAX;
+       if (reg->s32_min_value >= 0)
+               reg->smin_value = reg->s32_min_value;
+       else
+               reg->smin_value = 0;
 }
 
 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
@@ -4340,7 +4340,9 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
 
        if (ret_type != RET_INTEGER ||
            (func_id != BPF_FUNC_get_stack &&
-            func_id != BPF_FUNC_probe_read_str))
+            func_id != BPF_FUNC_probe_read_str &&
+            func_id != BPF_FUNC_probe_read_kernel_str &&
+            func_id != BPF_FUNC_probe_read_user_str))
                return;
 
        ret_reg->smax_value = meta->msize_max_value;
@@ -7059,6 +7061,23 @@ static int check_return_code(struct bpf_verifier_env *env)
                        return 0;
                range = tnum_const(0);
                break;
+       case BPF_PROG_TYPE_TRACING:
+               switch (env->prog->expected_attach_type) {
+               case BPF_TRACE_FENTRY:
+               case BPF_TRACE_FEXIT:
+                       range = tnum_const(0);
+                       break;
+               case BPF_TRACE_RAW_TP:
+               case BPF_MODIFY_RETURN:
+                       return 0;
+               default:
+                       return -ENOTSUPP;
+               }
+               break;
+       case BPF_PROG_TYPE_EXT:
+               /* freplace program can return anything as its return value
+                * depends on the to-be-replaced kernel func or bpf program.
+                */
        default:
                return 0;
        }
@@ -10409,22 +10428,13 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
 }
 #define SECURITY_PREFIX "security_"
 
-static int check_attach_modify_return(struct bpf_verifier_env *env)
+static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
 {
-       struct bpf_prog *prog = env->prog;
-       unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
-
-       /* This is expected to be cleaned up in the future with the KRSI effort
-        * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
-        */
        if (within_error_injection_list(addr) ||
            !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
                     sizeof(SECURITY_PREFIX) - 1))
                return 0;
 
-       verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
-               prog->aux->attach_btf_id, prog->aux->attach_func_name);
-
        return -EINVAL;
 }
 
@@ -10635,11 +10645,18 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
                                goto out;
                        }
                }
+
+               if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
+                       ret = check_attach_modify_return(prog, addr);
+                       if (ret)
+                               verbose(env, "%s() is not modifiable\n",
+                                       prog->aux->attach_func_name);
+               }
+
+               if (ret)
+                       goto out;
                tr->func.addr = (void *)addr;
                prog->aux->trampoline = tr;
-
-               if (prog->expected_attach_type == BPF_MODIFY_RETURN)
-                       ret = check_attach_modify_return(env);
 out:
                mutex_unlock(&tr->mutex);
                if (ret)
index 6f87352f8219cddbd96e3c439e88c3910c15fa7f..41ca996568dfbbfdc9d1e2dc7841ccd6ff819a56 100644 (file)
@@ -33,12 +33,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
                return;
 
        /*
-        * Paired with the one in cgroup_rstat_cpu_pop_updated().  Either we
-        * see NULL updated_next or they see our updated stat.
-        */
-       smp_mb();
-
-       /*
+        * Speculative already-on-list test. This may race leading to
+        * temporary inaccuracies, which is fine.
+        *
         * Because @parent's updated_children is terminated with @parent
         * instead of NULL, we can tell whether @cgrp is on the list by
         * testing the next pointer for NULL.
@@ -134,13 +131,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
                *nextp = rstatc->updated_next;
                rstatc->updated_next = NULL;
 
-               /*
-                * Paired with the one in cgroup_rstat_cpu_updated().
-                * Either they see NULL updated_next or we see their
-                * updated stat.
-                */
-               smp_mb();
-
                return pos;
        }
 
index 8c700f881d920dfccaf531b7df23314f7bbf4120..48ed22774efaa6c3d9bc7738d29cdb15d29ff02d 100644 (file)
@@ -2486,11 +2486,11 @@ long do_fork(unsigned long clone_flags,
              int __user *child_tidptr)
 {
        struct kernel_clone_args args = {
-               .flags          = (clone_flags & ~CSIGNAL),
+               .flags          = (lower_32_bits(clone_flags) & ~CSIGNAL),
                .pidfd          = parent_tidptr,
                .child_tid      = child_tidptr,
                .parent_tid     = parent_tidptr,
-               .exit_signal    = (clone_flags & CSIGNAL),
+               .exit_signal    = (lower_32_bits(clone_flags) & CSIGNAL),
                .stack          = stack_start,
                .stack_size     = stack_size,
        };
@@ -2508,8 +2508,9 @@ long do_fork(unsigned long clone_flags,
 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 {
        struct kernel_clone_args args = {
-               .flags          = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
-               .exit_signal    = (flags & CSIGNAL),
+               .flags          = ((lower_32_bits(flags) | CLONE_VM |
+                                   CLONE_UNTRACED) & ~CSIGNAL),
+               .exit_signal    = (lower_32_bits(flags) & CSIGNAL),
                .stack          = (unsigned long)fn,
                .stack_size     = (unsigned long)arg,
        };
@@ -2570,11 +2571,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 #endif
 {
        struct kernel_clone_args args = {
-               .flags          = (clone_flags & ~CSIGNAL),
+               .flags          = (lower_32_bits(clone_flags) & ~CSIGNAL),
                .pidfd          = parent_tidptr,
                .child_tid      = child_tidptr,
                .parent_tid     = parent_tidptr,
-               .exit_signal    = (clone_flags & CSIGNAL),
+               .exit_signal    = (lower_32_bits(clone_flags) & CSIGNAL),
                .stack          = newsp,
                .tls            = tls,
        };
index f50354202dbe52ca0652f7c20151b45894fd8840..8accc9722a815bf88d301d86a98d90bc80f92b7d 100644 (file)
@@ -740,8 +740,8 @@ static const struct file_operations kcov_fops = {
  * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
  * arbitrary 4-byte non-zero number as the instance id). This common handle
  * then gets saved into the task_struct of the process that issued the
- * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn
- * kernel threads, the common handle must be retrived via kcov_common_handle()
+ * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
+ * kernel threads, the common handle must be retrieved via kcov_common_handle()
  * and passed to the spawned threads via custom annotations. Those kernel
  * threads must in turn be annotated with kcov_remote_start(common_handle) and
  * kcov_remote_stop(). All of the threads that are spawned by the same process
index 86aba8706b1654b83162fb2888916f2de013f6b0..30bd28d1d418cce6d785d1b65e68f429a253845d 100644 (file)
@@ -898,6 +898,13 @@ static int software_resume(void)
        error = freeze_processes();
        if (error)
                goto Close_Finish;
+
+       error = freeze_kernel_threads();
+       if (error) {
+               thaw_processes();
+               goto Close_Finish;
+       }
+
        error = load_image_and_restore();
        thaw_processes();
  Finish:
index a562df57a86e0cc82854bce06a6a4fcbefc083b6..239970b991c03d62177f4c1244decd63c657418d 100644 (file)
@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
        P(se.avg.util_est.enqueued);
 #endif
 #ifdef CONFIG_UCLAMP_TASK
-       __PS("uclamp.min", p->uclamp[UCLAMP_MIN].value);
-       __PS("uclamp.max", p->uclamp[UCLAMP_MAX].value);
+       __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
+       __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
        __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
        __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
 #endif
index 02f323b85b6d3a41441a8159ac294db0a9f89815..da3e5b54715b6753eaac360d7562f3deedb26210 100644 (file)
@@ -2908,7 +2908,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
        /*
         * We don't care about NUMA placement if we don't have memory.
         */
-       if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
+       if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
                return;
 
        /*
@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        struct rq *rq = rq_of(cfs_rq);
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
-       int enqueue = 1;
        long task_delta, idle_task_delta;
 
        se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        idle_task_delta = cfs_rq->idle_h_nr_running;
        for_each_sched_entity(se) {
                if (se->on_rq)
-                       enqueue = 0;
+                       break;
+               cfs_rq = cfs_rq_of(se);
+               enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
+
+               cfs_rq->h_nr_running += task_delta;
+               cfs_rq->idle_h_nr_running += idle_task_delta;
+
+               /* end evaluation on encountering a throttled cfs_rq */
+               if (cfs_rq_throttled(cfs_rq))
+                       goto unthrottle_throttle;
+       }
 
+       for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               if (enqueue) {
-                       enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
-               } else {
-                       update_load_avg(cfs_rq, se, 0);
-                       se_update_runnable(se);
-               }
+
+               update_load_avg(cfs_rq, se, UPDATE_TG);
+               se_update_runnable(se);
 
                cfs_rq->h_nr_running += task_delta;
                cfs_rq->idle_h_nr_running += idle_task_delta;
 
+
+               /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
-                       break;
+                       goto unthrottle_throttle;
+
+               /*
+                * One parent has been throttled and cfs_rq removed from the
+                * list. Add it back to not break the leaf list.
+                */
+               if (throttled_hierarchy(cfs_rq))
+                       list_add_leaf_cfs_rq(cfs_rq);
        }
 
-       if (!se)
-               add_nr_running(rq, task_delta);
+       /* At this point se is NULL and we are at root level*/
+       add_nr_running(rq, task_delta);
 
+unthrottle_throttle:
        /*
         * The cfs_rq_throttled() breaks in the above iteration can result in
         * incomplete leaf list maintenance, resulting in triggering the
@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
 
-               list_add_leaf_cfs_rq(cfs_rq);
+               if (list_add_leaf_cfs_rq(cfs_rq))
+                       break;
        }
 
        assert_list_leaf_cfs_rq(rq);
@@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
                        goto enqueue_throttle;
+
+               /*
+                * One parent has been throttled and cfs_rq removed from the
+                * list. Add it back to not break the leaf list.
+                */
+               if (throttled_hierarchy(cfs_rq))
+                       list_add_leaf_cfs_rq(cfs_rq);
        }
 
 enqueue_throttle:
index 402eef84c859ac0b7356ca89f22446b00e0b757e..743647005f64e6aea6946f01e98c25b6851338c7 100644 (file)
@@ -466,7 +466,6 @@ config PROFILE_ANNOTATED_BRANCHES
 config PROFILE_ALL_BRANCHES
        bool "Profile all if conditionals" if !FORTIFY_SOURCE
        select TRACE_BRANCH_PROFILING
-       imply CC_DISABLE_WARN_MAYBE_UNINITIALIZED  # avoid false positives
        help
          This tracer profiles all branch conditions. Every if ()
          taken in the kernel is recorded whether it hit or miss.
index ca1796747a773baf4079405fcf0e0e93dd4cf050..a010edc37ee02577011f5b385399e9f5376a217f 100644 (file)
@@ -323,17 +323,15 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
 
 /*
  * Only limited trace_printk() conversion specifiers allowed:
- * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
+ * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
  */
 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
           u64, arg2, u64, arg3)
 {
+       int i, mod[3] = {}, fmt_cnt = 0;
+       char buf[64], fmt_ptype;
+       void *unsafe_ptr = NULL;
        bool str_seen = false;
-       int mod[3] = {};
-       int fmt_cnt = 0;
-       u64 unsafe_addr;
-       char buf[64];
-       int i;
 
        /*
         * bpf_check()->check_func_arg()->check_stack_boundary()
@@ -359,40 +357,71 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
                if (fmt[i] == 'l') {
                        mod[fmt_cnt]++;
                        i++;
-               } else if (fmt[i] == 'p' || fmt[i] == 's') {
+               } else if (fmt[i] == 'p') {
                        mod[fmt_cnt]++;
+                       if ((fmt[i + 1] == 'k' ||
+                            fmt[i + 1] == 'u') &&
+                           fmt[i + 2] == 's') {
+                               fmt_ptype = fmt[i + 1];
+                               i += 2;
+                               goto fmt_str;
+                       }
+
                        /* disallow any further format extensions */
                        if (fmt[i + 1] != 0 &&
                            !isspace(fmt[i + 1]) &&
                            !ispunct(fmt[i + 1]))
                                return -EINVAL;
-                       fmt_cnt++;
-                       if (fmt[i] == 's') {
-                               if (str_seen)
-                                       /* allow only one '%s' per fmt string */
-                                       return -EINVAL;
-                               str_seen = true;
-
-                               switch (fmt_cnt) {
-                               case 1:
-                                       unsafe_addr = arg1;
-                                       arg1 = (long) buf;
-                                       break;
-                               case 2:
-                                       unsafe_addr = arg2;
-                                       arg2 = (long) buf;
-                                       break;
-                               case 3:
-                                       unsafe_addr = arg3;
-                                       arg3 = (long) buf;
-                                       break;
-                               }
-                               buf[0] = 0;
-                               strncpy_from_unsafe(buf,
-                                                   (void *) (long) unsafe_addr,
+
+                       goto fmt_next;
+               } else if (fmt[i] == 's') {
+                       mod[fmt_cnt]++;
+                       fmt_ptype = fmt[i];
+fmt_str:
+                       if (str_seen)
+                               /* allow only one '%s' per fmt string */
+                               return -EINVAL;
+                       str_seen = true;
+
+                       if (fmt[i + 1] != 0 &&
+                           !isspace(fmt[i + 1]) &&
+                           !ispunct(fmt[i + 1]))
+                               return -EINVAL;
+
+                       switch (fmt_cnt) {
+                       case 0:
+                               unsafe_ptr = (void *)(long)arg1;
+                               arg1 = (long)buf;
+                               break;
+                       case 1:
+                               unsafe_ptr = (void *)(long)arg2;
+                               arg2 = (long)buf;
+                               break;
+                       case 2:
+                               unsafe_ptr = (void *)(long)arg3;
+                               arg3 = (long)buf;
+                               break;
+                       }
+
+                       buf[0] = 0;
+                       switch (fmt_ptype) {
+                       case 's':
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+                               strncpy_from_unsafe(buf, unsafe_ptr,
                                                    sizeof(buf));
+                               break;
+#endif
+                       case 'k':
+                               strncpy_from_unsafe_strict(buf, unsafe_ptr,
+                                                          sizeof(buf));
+                               break;
+                       case 'u':
+                               strncpy_from_unsafe_user(buf,
+                                       (__force void __user *)unsafe_ptr,
+                                                        sizeof(buf));
+                               break;
                        }
-                       continue;
+                       goto fmt_next;
                }
 
                if (fmt[i] == 'l') {
@@ -403,6 +432,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
                if (fmt[i] != 'i' && fmt[i] != 'd' &&
                    fmt[i] != 'u' && fmt[i] != 'x')
                        return -EINVAL;
+fmt_next:
                fmt_cnt++;
        }
 
@@ -825,14 +855,16 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
                return &bpf_probe_read_kernel_proto;
-       case BPF_FUNC_probe_read:
-               return &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
                return &bpf_probe_read_kernel_str_proto;
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+       case BPF_FUNC_probe_read:
+               return &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
                return &bpf_probe_read_compat_str_proto;
+#endif
 #ifdef CONFIG_CGROUPS
        case BPF_FUNC_get_current_cgroup_id:
                return &bpf_get_current_cgroup_id_proto;
index 0456e0a3dab14374f540ed0473ec8fb1bc4af34d..382775edf6902d2da767d369954c245be48de546 100644 (file)
@@ -4,28 +4,6 @@
 
 #ifdef CONFIG_FUNCTION_TRACER
 
-/*
- * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw_check() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw_check() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
-#define do_for_each_ftrace_op(op, list)                        \
-       op = rcu_dereference_raw_check(list);                   \
-       do
-
-/*
- * Optimized for just a single item in the list (as that is the normal case).
- */
-#define while_for_each_ftrace_op(op)                           \
-       while (likely(op = rcu_dereference_raw_check((op)->next)) &&    \
-              unlikely((op) != &ftrace_list_end))
-
-extern struct ftrace_ops __rcu *ftrace_ops_list;
-extern struct ftrace_ops ftrace_list_end;
 extern struct mutex ftrace_lock;
 extern struct ftrace_ops global_ops;
 
index 31c0fad4cb9e18910cd318abb4d76c4db09d592e..312d1a0ca3b60ca9077c253b5175dd50c625ada8 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/printk.h>
 #include <linux/string.h>
 #include <linux/sysfs.h>
+#include <linux/completion.h>
 
 static ulong delay = 100;
 static char test_mode[12] = "irq";
@@ -28,6 +29,8 @@ MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
 MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
 MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
 
+static struct completion done;
+
 #define MIN(x, y) ((x) < (y) ? (x) : (y))
 
 static void busy_wait(ulong time)
@@ -113,22 +116,47 @@ static int preemptirq_delay_run(void *data)
 
        for (i = 0; i < s; i++)
                (testfuncs[i])(i);
+
+       complete(&done);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
+       }
+
+       __set_current_state(TASK_RUNNING);
+
        return 0;
 }
 
-static struct task_struct *preemptirq_start_test(void)
+static int preemptirq_run_test(void)
 {
+       struct task_struct *task;
        char task_name[50];
 
+       init_completion(&done);
+
        snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
-       return kthread_run(preemptirq_delay_run, NULL, task_name);
+       task =  kthread_run(preemptirq_delay_run, NULL, task_name);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       if (task) {
+               wait_for_completion(&done);
+               kthread_stop(task);
+       }
+       return 0;
 }
 
 
 static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
                         const char *buf, size_t count)
 {
-       preemptirq_start_test();
+       ssize_t ret;
+
+       ret = preemptirq_run_test();
+       if (ret)
+               return ret;
        return count;
 }
 
@@ -148,11 +176,9 @@ static struct kobject *preemptirq_delay_kobj;
 
 static int __init preemptirq_delay_init(void)
 {
-       struct task_struct *test_task;
        int retval;
 
-       test_task = preemptirq_start_test();
-       retval = PTR_ERR_OR_ZERO(test_task);
+       retval = preemptirq_run_test();
        if (retval != 0)
                return retval;
 
index 6f0b42ceeb0029f1b7c2e8ac8469b1d0ba2daf8d..b8e1ca48be50f0ec8c6ccda334a425602cdc9849 100644 (file)
@@ -193,7 +193,7 @@ rb_event_length(struct ring_buffer_event *event)
        case RINGBUF_TYPE_DATA:
                return rb_event_data_length(event);
        default:
-               BUG();
+               WARN_ON_ONCE(1);
        }
        /* not hit */
        return 0;
@@ -249,7 +249,7 @@ rb_event_data(struct ring_buffer_event *event)
 {
        if (extended_time(event))
                event = skip_time_extend(event);
-       BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
+       WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
        /* If length is in len field, then array[0] has the data */
        if (event->type_len)
                return (void *)&event->array[0];
@@ -3727,7 +3727,7 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
                return;
 
        default:
-               BUG();
+               RB_WARN_ON(cpu_buffer, 1);
        }
        return;
 }
@@ -3757,7 +3757,7 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
                return;
 
        default:
-               BUG();
+               RB_WARN_ON(iter->cpu_buffer, 1);
        }
        return;
 }
@@ -4020,7 +4020,7 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
                return event;
 
        default:
-               BUG();
+               RB_WARN_ON(cpu_buffer, 1);
        }
 
        return NULL;
@@ -4034,7 +4034,6 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        int nr_loops = 0;
-       bool failed = false;
 
        if (ts)
                *ts = 0;
@@ -4056,19 +4055,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
                return NULL;
 
        /*
-        * We repeat when a time extend is encountered or we hit
-        * the end of the page. Since the time extend is always attached
-        * to a data event, we should never loop more than three times.
-        * Once for going to next page, once on time extend, and
-        * finally once to get the event.
-        * We should never hit the following condition more than thrice,
-        * unless the buffer is very small, and there's a writer
-        * that is causing the reader to fail getting an event.
+        * As the writer can mess with what the iterator is trying
+        * to read, just give up if we fail to get an event after
+        * three tries. The iterator is not as reliable when reading
+        * the ring buffer with an active write as the consumer is.
+        * Do not warn if the three failures is reached.
         */
-       if (++nr_loops > 3) {
-               RB_WARN_ON(cpu_buffer, !failed);
+       if (++nr_loops > 3)
                return NULL;
-       }
 
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
@@ -4079,10 +4073,8 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        }
 
        event = rb_iter_head_event(iter);
-       if (!event) {
-               failed = true;
+       if (!event)
                goto again;
-       }
 
        switch (event->type_len) {
        case RINGBUF_TYPE_PADDING:
@@ -4117,7 +4109,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
                return event;
 
        default:
-               BUG();
+               RB_WARN_ON(cpu_buffer, 1);
        }
 
        return NULL;
index 8d2b988126250da9078230ffe31d6b77c493f2dc..29615f15a820b2f67d66af8084bfcd13936ed9c3 100644 (file)
@@ -947,7 +947,8 @@ int __trace_bputs(unsigned long ip, const char *str)
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
-void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
+static void tracing_snapshot_instance_cond(struct trace_array *tr,
+                                          void *cond_data)
 {
        struct tracer *tracer = tr->current_trace;
        unsigned long flags;
@@ -8525,6 +8526,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
         */
        allocate_snapshot = false;
 #endif
+
+       /*
+        * Because of some magic with the way alloc_percpu() works on
+        * x86_64, we need to synchronize the pgd of all the tables,
+        * otherwise the trace events that happen in x86_64 page fault
+        * handlers can't cope with accessing the chance that a
+        * alloc_percpu()'d memory might be touched in the page fault trace
+        * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
+        * calls in tracing, because something might get triggered within a
+        * page fault trace event!
+        */
+       vmalloc_sync_mappings();
+
        return 0;
 }
 
index 06d7feb5255f88d51e80ed144ee3ef41ccb8a783..9de29bb45a27f1aae98479ff60660db2b807b4f6 100644 (file)
@@ -95,24 +95,20 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
        struct xbc_node *anode;
        char buf[MAX_BUF_LEN];
        const char *val;
-       int ret;
+       int ret = 0;
 
-       kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+       xbc_node_for_each_array_value(node, "probes", anode, val) {
+               kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
 
-       ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
-       if (ret)
-               return ret;
+               ret = kprobe_event_gen_cmd_start(&cmd, event, val);
+               if (ret)
+                       break;
 
-       xbc_node_for_each_array_value(node, "probes", anode, val) {
-               ret = kprobe_event_add_field(&cmd, val);
+               ret = kprobe_event_gen_cmd_end(&cmd);
                if (ret)
-                       return ret;
+                       pr_err("Failed to add probe: %s\n", buf);
        }
 
-       ret = kprobe_event_gen_cmd_end(&cmd);
-       if (ret)
-               pr_err("Failed to add probe: %s\n", buf);
-
        return ret;
 }
 #else
index d0568af4a0ef62001372988ba539058551934741..35989383ae1131e11e45a39e0a5333a57f4493e0 100644 (file)
@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr)
 
 static bool within_notrace_func(struct trace_kprobe *tk)
 {
-       unsigned long addr = addr = trace_kprobe_address(tk);
+       unsigned long addr = trace_kprobe_address(tk);
        char symname[KSYM_NAME_LEN], *p;
 
        if (!__within_notrace_func(addr))
@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
  * complete command or only the first part of it; in the latter case,
  * kprobe_event_add_fields() can be used to add more fields following this.
  *
+ * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
+ * returns -EINVAL if @loc == NULL.
+ *
  * Return: 0 if successful, error otherwise.
  */
 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
        if (cmd->type != DYNEVENT_TYPE_KPROBE)
                return -EINVAL;
 
+       if (!loc)
+               return -EINVAL;
+
        if (kretprobe)
                snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
        else
index 7f255b5a8845aef24b048dfabaf07462b04c9fca..3474d6aa55d832ae0d8cb1658f3c417e3de92a01 100644 (file)
@@ -475,6 +475,12 @@ static void umh_clean_and_save_pid(struct subprocess_info *info)
 {
        struct umh_info *umh_info = info->data;
 
+       /* cleanup if umh_pipe_setup() was successful but exec failed */
+       if (info->pid && info->retval) {
+               fput(umh_info->pipe_to_umh);
+               fput(umh_info->pipe_from_umh);
+       }
+
        argv_free(info->argv);
        umh_info->pid = info->pid;
 }
@@ -544,6 +550,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
  * Runs a user-space application.  The application is started
  * asynchronously if wait is not set, and runs as a child of system workqueues.
  * (ie. it runs with full root capabilities and optimized affinity).
+ *
+ * Note: successful return value does not guarantee the helper was called at
+ * all. You can't rely on sub_info->{init,cleanup} being called even for
+ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
+ * into a successful no-op.
  */
 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 {
index 48469c95d78e6da6c0656502c6d11b1216cef3c9..929211039bacd0c0d4ccd2aa7af0a9c69744132f 100644 (file)
@@ -60,18 +60,15 @@ config UBSAN_SANITIZE_ALL
          Enabling this option will get kernel image size increased
          significantly.
 
-config UBSAN_NO_ALIGNMENT
-       bool "Disable checking of pointers alignment"
-       default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
+config UBSAN_ALIGNMENT
+       bool "Enable checks for pointers alignment"
+       default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+       depends on !X86 || !COMPILE_TEST
        help
-         This option disables the check of unaligned memory accesses.
-         This option should be used when building allmodconfig.
-         Disabling this option on architectures that support unaligned
+         This option enables the check of unaligned memory accesses.
+         Enabling this option on architectures that support unaligned
          accesses may produce a lot of false positives.
 
-config UBSAN_ALIGNMENT
-       def_bool !UBSAN_NO_ALIGNMENT
-
 config TEST_UBSAN
        tristate "Module for testing for undefined behavior detection"
        depends on m
index 7a6430a7fca0070e0aa5e305eeb662fc6425e683..ccb2ffad8dcfab9beef23f4f2345390c0edd6de1 100644 (file)
@@ -93,7 +93,7 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
         * representation.
         */
        if (suite)
-               pr_info("%s %zd - %s",
+               pr_info("%s %zd - %s\n",
                        kunit_status_to_string(is_ok),
                        test_number, description);
        else
index 2d9f520d2f275ca95562a01371a0bd33c9278505..6b1622f4d7c2536efc743e9ee9e00f710a3d63d3 100644 (file)
@@ -214,6 +214,7 @@ test_string(void)
 #define PTR_STR "ffff0123456789ab"
 #define PTR_VAL_NO_CRNG "(____ptrval____)"
 #define ZEROS "00000000"       /* hex 32 zero bits */
+#define ONES "ffffffff"                /* hex 32 one bits */
 
 static int __init
 plain_format(void)
@@ -245,6 +246,7 @@ plain_format(void)
 #define PTR_STR "456789ab"
 #define PTR_VAL_NO_CRNG "(ptrval)"
 #define ZEROS ""
+#define ONES ""
 
 static int __init
 plain_format(void)
@@ -330,14 +332,28 @@ test_hashed(const char *fmt, const void *p)
        test(buf, fmt, p);
 }
 
+/*
+ * NULL pointers aren't hashed.
+ */
 static void __init
 null_pointer(void)
 {
-       test_hashed("%p", NULL);
+       test(ZEROS "00000000", "%p", NULL);
        test(ZEROS "00000000", "%px", NULL);
        test("(null)", "%pE", NULL);
 }
 
+/*
+ * Error pointers aren't hashed.
+ */
+static void __init
+error_pointer(void)
+{
+       test(ONES "fffffff5", "%p", ERR_PTR(-11));
+       test(ONES "fffffff5", "%px", ERR_PTR(-11));
+       test("(efault)", "%pE", ERR_PTR(-11));
+}
+
 #define PTR_INVALID ((void *)0x000000ab)
 
 static void __init
@@ -649,6 +665,7 @@ test_pointer(void)
 {
        plain();
        null_pointer();
+       error_pointer();
        invalid_pointer();
        symbol_ptr();
        kernel_ptr();
index 7c488a1ce318cc52c375b0db7d9db2e353dfa836..7c47ad52ce2f7448cda5da17cb2bf25527b0eb12 100644 (file)
@@ -794,6 +794,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
        unsigned long hashval;
        int ret;
 
+       /*
+        * Print the real pointer value for NULL and error pointers,
+        * as they are not actual addresses.
+        */
+       if (IS_ERR_OR_NULL(ptr))
+               return pointer_string(buf, end, ptr, spec);
+
        /* When debugging early boot use non-cryptographically secure hash. */
        if (unlikely(debug_boot_weak_hash)) {
                hashval = hash_long((unsigned long)ptr, 32);
@@ -2168,6 +2175,10 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
  *             f full name
  *             P node name, including a possible unit address
  * - 'x' For printing the address. Equivalent to "%lx".
+ * - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
+ *           bpf_trace_printk() where [ku] prefix specifies either kernel (k)
+ *           or user (u) memory to probe, and:
+ *              s a string, equivalent to "%s" on direct vsnprintf() use
  *
  * ** When making changes please also update:
  *     Documentation/core-api/printk-formats.rst
@@ -2251,6 +2262,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
                if (!IS_ERR(ptr))
                        break;
                return err_ptr(buf, end, ptr, spec);
+       case 'u':
+       case 'k':
+               switch (fmt[1]) {
+               case 's':
+                       return string(buf, end, ptr, spec);
+               default:
+                       return error_string(buf, end, "(einval)", spec);
+               }
        }
 
        /* default is to _not_ leak addresses, hash before printing */
index c81b4f3a7268458335635a6a01d93f09ba5469f4..efc5b83acd2df59499f80c450e2db3b6f2c176a5 100644 (file)
@@ -21,7 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
 
 static struct class *bdi_class;
-const char *bdi_unknown_name = "(unknown)";
+static const char *bdi_unknown_name = "(unknown)";
 
 /*
  * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
@@ -938,7 +938,8 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
        if (bdi->dev)   /* The driver needs to use separate queues per device */
                return 0;
 
-       dev = device_create_vargs(bdi_class, NULL, MKDEV(0, 0), bdi, fmt, args);
+       vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
+       dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
        if (IS_ERR(dev))
                return PTR_ERR(dev);
 
@@ -1043,6 +1044,14 @@ void bdi_put(struct backing_dev_info *bdi)
 }
 EXPORT_SYMBOL(bdi_put);
 
+const char *bdi_dev_name(struct backing_dev_info *bdi)
+{
+       if (!bdi || !bdi->dev)
+               return bdi_unknown_name;
+       return bdi->dev_name;
+}
+EXPORT_SYMBOL_GPL(bdi_dev_name);
+
 static wait_queue_head_t congestion_wqh[2] = {
                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
index 50681f0286ded05c26c0dc98660e4df5cd1ebecc..87a6a59fe667706255ddd356956f3ee6d6f19ca2 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1218,6 +1218,10 @@ retry:
        if (!vma_permits_fault(vma, fault_flags))
                return -EFAULT;
 
+       if ((fault_flags & FAULT_FLAG_KILLABLE) &&
+           fatal_signal_pending(current))
+               return -EINTR;
+
        ret = handle_mm_fault(vma, address, fault_flags);
        major |= ret & VM_FAULT_MAJOR;
        if (ret & VM_FAULT_ERROR) {
@@ -1230,11 +1234,9 @@ retry:
 
        if (ret & VM_FAULT_RETRY) {
                down_read(&mm->mmap_sem);
-               if (!(fault_flags & FAULT_FLAG_TRIED)) {
-                       *unlocked = true;
-                       fault_flags |= FAULT_FLAG_TRIED;
-                       goto retry;
-               }
+               *unlocked = true;
+               fault_flags |= FAULT_FLAG_TRIED;
+               goto retry;
        }
 
        if (tsk) {
index 08b43de2383b7b844b55d13e5814594c5cf7ecec..de3121848ddf3e7d053006c0edd0b1c00a5ebdaf 100644 (file)
@@ -1,23 +1,28 @@
 # SPDX-License-Identifier: GPL-2.0
 KASAN_SANITIZE := n
-UBSAN_SANITIZE_common.o := n
-UBSAN_SANITIZE_generic.o := n
-UBSAN_SANITIZE_generic_report.o := n
-UBSAN_SANITIZE_tags.o := n
+UBSAN_SANITIZE := n
 KCOV_INSTRUMENT := n
 
+# Disable ftrace to avoid recursion.
 CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_quarantine.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_report.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_tags_report.o = $(CC_FLAGS_FTRACE)
 
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
-
-CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
-CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_init.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_quarantine.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+CFLAGS_tags_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
 
 obj-$(CONFIG_KASAN) := common.o init.o report.o
 obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o
index 56ff8885fe2ecb24a57dcccc329158558d2085a8..098a7dbaced664c33d5da40827d28aa3b9d75f6b 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DISABLE_BRANCH_PROFILING
 
 #include <linux/export.h>
 #include <linux/interrupt.h>
index e8f37199d885172cfb2ae948e7134520e07718ad..cfade6413528d2aa10359e3418e602f25c90d801 100644 (file)
@@ -212,8 +212,6 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
 void __asan_register_globals(struct kasan_global *globals, size_t size);
 void __asan_unregister_globals(struct kasan_global *globals, size_t size);
-void __asan_loadN(unsigned long addr, size_t size);
-void __asan_storeN(unsigned long addr, size_t size);
 void __asan_handle_no_return(void);
 void __asan_alloca_poison(unsigned long addr, size_t size);
 void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
@@ -228,6 +226,8 @@ void __asan_load8(unsigned long addr);
 void __asan_store8(unsigned long addr);
 void __asan_load16(unsigned long addr);
 void __asan_store16(unsigned long addr);
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
 
 void __asan_load1_noabort(unsigned long addr);
 void __asan_store1_noabort(unsigned long addr);
@@ -239,6 +239,21 @@ void __asan_load8_noabort(unsigned long addr);
 void __asan_store8_noabort(unsigned long addr);
 void __asan_load16_noabort(unsigned long addr);
 void __asan_store16_noabort(unsigned long addr);
+void __asan_loadN_noabort(unsigned long addr, size_t size);
+void __asan_storeN_noabort(unsigned long addr, size_t size);
+
+void __asan_report_load1_noabort(unsigned long addr);
+void __asan_report_store1_noabort(unsigned long addr);
+void __asan_report_load2_noabort(unsigned long addr);
+void __asan_report_store2_noabort(unsigned long addr);
+void __asan_report_load4_noabort(unsigned long addr);
+void __asan_report_store4_noabort(unsigned long addr);
+void __asan_report_load8_noabort(unsigned long addr);
+void __asan_report_store8_noabort(unsigned long addr);
+void __asan_report_load16_noabort(unsigned long addr);
+void __asan_report_store16_noabort(unsigned long addr);
+void __asan_report_load_n_noabort(unsigned long addr, size_t size);
+void __asan_report_store_n_noabort(unsigned long addr, size_t size);
 
 void __asan_set_shadow_00(const void *addr, size_t size);
 void __asan_set_shadow_f1(const void *addr, size_t size);
@@ -247,4 +262,19 @@ void __asan_set_shadow_f3(const void *addr, size_t size);
 void __asan_set_shadow_f5(const void *addr, size_t size);
 void __asan_set_shadow_f8(const void *addr, size_t size);
 
+void __hwasan_load1_noabort(unsigned long addr);
+void __hwasan_store1_noabort(unsigned long addr);
+void __hwasan_load2_noabort(unsigned long addr);
+void __hwasan_store2_noabort(unsigned long addr);
+void __hwasan_load4_noabort(unsigned long addr);
+void __hwasan_store4_noabort(unsigned long addr);
+void __hwasan_load8_noabort(unsigned long addr);
+void __hwasan_store8_noabort(unsigned long addr);
+void __hwasan_load16_noabort(unsigned long addr);
+void __hwasan_store16_noabort(unsigned long addr);
+void __hwasan_loadN_noabort(unsigned long addr, size_t size);
+void __hwasan_storeN_noabort(unsigned long addr, size_t size);
+
+void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
+
 #endif
index 25b7734e70137213729e8efa8607eac2b6c592dd..8a959fdd30e35df718705ade5c8d5268afe86c85 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DISABLE_BRANCH_PROFILING
 
 #include <linux/export.h>
 #include <linux/interrupt.h>
index 99d77ffb79c2b39b2cd1cc4dfd72c4bf1684f3d6..cd280afb246e2cd7ad61bbd06072fb9ed8db8517 100644 (file)
@@ -1692,6 +1692,7 @@ static void collapse_file(struct mm_struct *mm,
                if (page_has_private(page) &&
                    !try_to_release_page(page, GFP_KERNEL)) {
                        result = SCAN_PAGE_HAS_PRIVATE;
+                       putback_lru_page(page);
                        goto out_unlock;
                }
 
index 5beea03dd58ad872d679376ee2b9f82cbe68f0fb..a3b97f10396654b18faa4642dafc4975507da45b 100644 (file)
@@ -4990,19 +4990,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        unsigned int size;
        int node;
        int __maybe_unused i;
+       long error = -ENOMEM;
 
        size = sizeof(struct mem_cgroup);
        size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
 
        memcg = kzalloc(size, GFP_KERNEL);
        if (!memcg)
-               return NULL;
+               return ERR_PTR(error);
 
        memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
                                 1, MEM_CGROUP_ID_MAX,
                                 GFP_KERNEL);
-       if (memcg->id.id < 0)
+       if (memcg->id.id < 0) {
+               error = memcg->id.id;
                goto fail;
+       }
 
        memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
        if (!memcg->vmstats_local)
@@ -5046,7 +5049,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 fail:
        mem_cgroup_id_remove(memcg);
        __mem_cgroup_free(memcg);
-       return NULL;
+       return ERR_PTR(error);
 }
 
 static struct cgroup_subsys_state * __ref
@@ -5057,8 +5060,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        long error = -ENOMEM;
 
        memcg = mem_cgroup_alloc();
-       if (!memcg)
-               return ERR_PTR(error);
+       if (IS_ERR(memcg))
+               return ERR_CAST(memcg);
 
        WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
        memcg->soft_limit = PAGE_COUNTER_MAX;
@@ -5108,7 +5111,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 fail:
        mem_cgroup_id_remove(memcg);
        mem_cgroup_free(memcg);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(error);
 }
 
 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
index c881abeba0bf2429fbb7bdbd990b46dbcd9b4531..6aa6ea6050684ec5c6ee9fa58ffe2841e26efd84 100644 (file)
@@ -794,7 +794,7 @@ out:
        if (locked && new_len > old_len)
                mm_populate(new_addr + old_len, new_len - old_len);
        userfaultfd_unmap_complete(mm, &uf_unmap_early);
-       mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
+       mremap_userfaultfd_complete(&uf, addr, ret, old_len);
        userfaultfd_unmap_complete(mm, &uf_unmap);
        return ret;
 }
index 69827d4fa0527dc1dbacdc1b0af711df0929d89f..13cc653122b73278afaeb6054539c619b164d11a 100644 (file)
@@ -1607,6 +1607,7 @@ void set_zone_contiguous(struct zone *zone)
                if (!__pageblock_pfn_to_page(block_start_pfn,
                                             block_end_pfn, zone))
                        return;
+               cond_resched();
        }
 
        /* We confirm that there is no hole */
@@ -2400,6 +2401,14 @@ static inline void boost_watermark(struct zone *zone)
 
        if (!watermark_boost_factor)
                return;
+       /*
+        * Don't bother in zones that are unlikely to produce results.
+        * On small machines, including kdump capture kernels running
+        * in a small area, boosting the watermark can cause an out of
+        * memory situation immediately.
+        */
+       if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
+               return;
 
        max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
                        watermark_boost_factor, 10000);
index d7e3bc649f4eb8e784c3406e22c6bf237d79ebde..7da7d7737dab3ddcdb14ee55e61b934c28bf8291 100644 (file)
@@ -80,6 +80,7 @@
 #include <linux/workqueue.h>
 #include <linux/kmemleak.h>
 #include <linux/sched.h>
+#include <linux/sched/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
@@ -1557,10 +1558,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                                 gfp_t gfp)
 {
-       /* whitelisted flags that can be passed to the backing allocators */
-       gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
-       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
-       bool do_warn = !(gfp & __GFP_NOWARN);
+       gfp_t pcpu_gfp;
+       bool is_atomic;
+       bool do_warn;
        static int warn_limit = 10;
        struct pcpu_chunk *chunk, *next;
        const char *err;
@@ -1569,6 +1569,12 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
        void __percpu *ptr;
        size_t bits, bit_align;
 
+       gfp = current_gfp_context(gfp);
+       /* whitelisted flags that can be passed to the backing allocators */
+       pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+       is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+       do_warn = !(gfp & __GFP_NOWARN);
+
        /*
         * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
         * therefore alignment must be a minimum of that many bytes.
index 9bf44955c4f1e421f43905aec869e0cbcc80442c..b762450fc9f076841a410e8b51067d54beb569a6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -551,15 +551,32 @@ static void print_section(char *level, char *text, u8 *addr,
        metadata_access_disable();
 }
 
+/*
+ * See comment in calculate_sizes().
+ */
+static inline bool freeptr_outside_object(struct kmem_cache *s)
+{
+       return s->offset >= s->inuse;
+}
+
+/*
+ * Return offset of the end of info block which is inuse + free pointer if
+ * not overlapping with object.
+ */
+static inline unsigned int get_info_end(struct kmem_cache *s)
+{
+       if (freeptr_outside_object(s))
+               return s->inuse + sizeof(void *);
+       else
+               return s->inuse;
+}
+
 static struct track *get_track(struct kmem_cache *s, void *object,
        enum track_item alloc)
 {
        struct track *p;
 
-       if (s->offset)
-               p = object + s->offset + sizeof(void *);
-       else
-               p = object + s->inuse;
+       p = object + get_info_end(s);
 
        return p + alloc;
 }
@@ -686,10 +703,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
                print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
-       if (s->offset)
-               off = s->offset + sizeof(void *);
-       else
-               off = s->inuse;
+       off = get_info_end(s);
 
        if (s->flags & SLAB_STORE_USER)
                off += 2 * sizeof(struct track);
@@ -782,7 +796,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  * object address
  *     Bytes of the object to be managed.
  *     If the freepointer may overlay the object then the free
- *     pointer is the first word of the object.
+ *     pointer is at the middle of the object.
  *
  *     Poisoning uses 0x6b (POISON_FREE) and the last byte is
  *     0xa5 (POISON_END)
@@ -816,11 +830,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 
 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 {
-       unsigned long off = s->inuse;   /* The end of info */
-
-       if (s->offset)
-               /* Freepointer is placed after the object. */
-               off += sizeof(void *);
+       unsigned long off = get_info_end(s);    /* The end of info */
 
        if (s->flags & SLAB_STORE_USER)
                /* We also have user information there */
@@ -907,7 +917,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                check_pad_bytes(s, page, p);
        }
 
-       if (!s->offset && val == SLUB_RED_ACTIVE)
+       if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
                /*
                 * Object and freepointer overlap. Cannot check
                 * freepointer while object is allocated.
@@ -3587,6 +3597,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 *
                 * This is the case if we do RCU, have a constructor or
                 * destructor or are poisoning the objects.
+                *
+                * The assumption that s->offset >= s->inuse means free
+                * pointer is outside of the object is used in the
+                * freeptr_outside_object() function. If that is no
+                * longer true, the function needs to be modified.
                 */
                s->offset = size;
                size += sizeof(void *);
index b06868fc492659a7038eed45a420d3dec391f5d0..a37c87b5aee2f3c038da7920ef8df6ee7de415d2 100644 (file)
@@ -1625,7 +1625,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
  * @dst:       The temp list to put pages on to.
  * @nr_scanned:        The number of pages that were scanned.
  * @sc:                The scan_control struct for this reclaim session
- * @mode:      One of the LRU isolation modes
  * @lru:       LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
index 42f31c4b53ad43e4204874fb20d3d901d9ef921a..460b0feced26abe6d3b2cc16305fbf1c44635190 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/spinlock.h>
 #include <linux/zpool.h>
 #include <linux/magic.h>
+#include <linux/kmemleak.h>
 
 /*
  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
@@ -215,6 +216,8 @@ static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
                                 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
 
        if (slots) {
+               /* It will be freed separately in free_handle(). */
+               kmemleak_not_leak(slots);
                memset(slots->slot, 0, sizeof(slots->slot));
                slots->pool = (unsigned long)pool;
                rwlock_init(&slots->lock);
@@ -318,16 +321,16 @@ static inline void free_handle(unsigned long handle)
        slots = handle_to_slots(handle);
        write_lock(&slots->lock);
        *(unsigned long *)handle = 0;
-       write_unlock(&slots->lock);
-       if (zhdr->slots == slots)
+       if (zhdr->slots == slots) {
+               write_unlock(&slots->lock);
                return; /* simple case, nothing else to do */
+       }
 
        /* we are freeing a foreign handle if we are here */
        zhdr->foreign_handles--;
        is_free = true;
-       read_lock(&slots->lock);
        if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
-               read_unlock(&slots->lock);
+               write_unlock(&slots->lock);
                return;
        }
        for (i = 0; i <= BUDDY_MASK; i++) {
@@ -336,7 +339,7 @@ static inline void free_handle(unsigned long handle)
                        break;
                }
        }
-       read_unlock(&slots->lock);
+       write_unlock(&slots->lock);
 
        if (is_free) {
                struct z3fold_pool *pool = slots_to_pool(slots);
@@ -422,6 +425,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
        zhdr->start_middle = 0;
        zhdr->cpu = -1;
        zhdr->foreign_handles = 0;
+       zhdr->mapped_count = 0;
        zhdr->slots = slots;
        zhdr->pool = pool;
        INIT_LIST_HEAD(&zhdr->buddy);
index 0ce530af534ddfed26a32b1b24f202ee7c582198..8575f5d52087d3f64f477e59edac2aa4e261ca9c 100644 (file)
@@ -177,18 +177,18 @@ static void vcc_destroy_socket(struct sock *sk)
 
        set_bit(ATM_VF_CLOSE, &vcc->flags);
        clear_bit(ATM_VF_READY, &vcc->flags);
-       if (vcc->dev) {
-               if (vcc->dev->ops->close)
-                       vcc->dev->ops->close(vcc);
-               if (vcc->push)
-                       vcc->push(vcc, NULL); /* atmarpd has no push */
-               module_put(vcc->owner);
-
-               while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-                       atm_return(vcc, skb->truesize);
-                       kfree_skb(skb);
-               }
+       if (vcc->dev && vcc->dev->ops->close)
+               vcc->dev->ops->close(vcc);
+       if (vcc->push)
+               vcc->push(vcc, NULL); /* atmarpd has no push */
+       module_put(vcc->owner);
+
+       while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+               atm_return(vcc, skb->truesize);
+               kfree_skb(skb);
+       }
 
+       if (vcc->dev && vcc->dev->ops->owner) {
                module_put(vcc->dev->ops->owner);
                atm_dev_put(vcc->dev);
        }
index 25fa3a7b72bda7f6610db69b421485a9b6db8e8c..ca37f5a71f5e9e44459cbbd6fa2825f7736b9cc0 100644 (file)
@@ -1264,6 +1264,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
                entry->vcc = NULL;
        }
        if (entry->recv_vcc) {
+               struct atm_vcc *vcc = entry->recv_vcc;
+               struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+               kfree(vpriv);
+               vcc->user_back = NULL;
+
                entry->recv_vcc->push = entry->old_recv_push;
                vcc_release_async(entry->recv_vcc, -EPIPE);
                entry->recv_vcc = NULL;
index ff57ea89c27e652c1128f68e69e22d41be361dac..fd91cd34f25e03d0178cbe66568ed392a7013f4e 100644 (file)
@@ -635,8 +635,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_BINDTODEVICE:
-               if (optlen > IFNAMSIZ)
-                       optlen = IFNAMSIZ;
+               if (optlen > IFNAMSIZ - 1)
+                       optlen = IFNAMSIZ - 1;
+
+               memset(devname, 0, sizeof(devname));
 
                if (copy_from_user(devname, optval, optlen)) {
                        res = -EFAULT;
index 9694662189992394fc07858ce61285e939ea8755..80b87b1f4e3a89ce52e97ed7117e5e4026f8dea3 100644 (file)
@@ -893,7 +893,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
 
        orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
        if (!orig_node)
-               return;
+               goto out;
 
        neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
                                                     ethhdr->h_source);
index 8f0717c3f7b545d53efc60f3bbbf4847d478a54e..b0469d15da0e9081084959090d00f19256d24594 100644 (file)
@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
  */
 static u8 batadv_nc_random_weight_tq(u8 tq)
 {
-       u8 rand_val, rand_tq;
-
-       get_random_bytes(&rand_val, sizeof(rand_val));
-
        /* randomize the estimated packet loss (max TQ - estimated TQ) */
-       rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
-
-       /* normalize the randomized packet loss */
-       rand_tq /= BATADV_TQ_MAX_VALUE;
+       u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
 
        /* convert to (randomized) estimated tq again */
        return BATADV_TQ_MAX_VALUE - rand_tq;
index c45962d8527bebf80c1e4e709784d139f7fe0a5d..0f962dcd239e64139832850a4f749313f38c6ad0 100644 (file)
@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
        ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
                                      &tp_override);
        if (!ret)
-               return count;
+               goto out;
 
        old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
        if (old_tp_override == tp_override)
@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
 
        tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
 
+       batadv_hardif_put(hard_iface);
        return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
                       tp_override % 10);
 }
index ad12fe3fca8cfb4888e500b11cd6ade9fb67ed53..83490bf73a13b3f2e0f19d83eab6e780ee8b7204 100644 (file)
@@ -2413,7 +2413,8 @@ void br_multicast_uninit_stats(struct net_bridge *br)
        free_percpu(br->mcast_stats);
 }
 
-static void mcast_stats_add_dir(u64 *dst, u64 *src)
+/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
+static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
 {
        dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
        dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
index 43dab4066f915bddcdcdc3dafc0d07fa7d50e01f..a0f5dbee8f9cb99a60ee6291757eb0f4a967a7ff 100644 (file)
@@ -612,6 +612,7 @@ int br_process_vlan_info(struct net_bridge *br,
                                               v - 1, rtm_cmd);
                                v_change_start = 0;
                        }
+                       cond_resched();
                }
                /* v_change_start is set only if the last/whole range changed */
                if (v_change_start)
index b325b569e76155f70d2266fb1aba3b8074537b91..f48cf4cfb80f9e4e1981dbb28a97e80d6af50176 100644 (file)
@@ -31,6 +31,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
        ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
        eth->h_proto = eth_hdr(oldskb)->h_proto;
        skb_pull(nskb, ETH_HLEN);
+
+       if (skb_vlan_tag_present(oldskb)) {
+               u16 vid = skb_vlan_tag_get(oldskb);
+
+               __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
+       }
 }
 
 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
index 998e26b75a789bcdca78c1ce3af4c9f2e734a9a4..1d4973f8cd7a69efe0763ba18990f96a2108893a 100644 (file)
@@ -3649,7 +3649,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
                 * supported.
                 */
                req->r_t.target_oloc.pool = m.redirect.oloc.pool;
-               req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
+               req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
+                               CEPH_OSD_FLAG_IGNORE_OVERLAY |
+                               CEPH_OSD_FLAG_IGNORE_CACHE;
                req->r_tid = 0;
                __submit_request(req, false);
                goto out_unlock_osdc;
index 522288177bbd8ce00d2152c218d7eef6fbcd82ab..2d8aceee4284554282ab25bd27c1b8b81e2ef92e 100644 (file)
@@ -4988,11 +4988,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
        return 0;
 }
 
-static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
+static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
                                    struct packet_type **ppt_prev)
 {
        struct packet_type *ptype, *pt_prev;
        rx_handler_func_t *rx_handler;
+       struct sk_buff *skb = *pskb;
        struct net_device *orig_dev;
        bool deliver_exact = false;
        int ret = NET_RX_DROP;
@@ -5023,8 +5024,10 @@ another_round:
                ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
                preempt_enable();
 
-               if (ret2 != XDP_PASS)
-                       return NET_RX_DROP;
+               if (ret2 != XDP_PASS) {
+                       ret = NET_RX_DROP;
+                       goto out;
+               }
                skb_reset_mac_len(skb);
        }
 
@@ -5174,6 +5177,13 @@ drop:
        }
 
 out:
+       /* The invariant here is that if *ppt_prev is not NULL
+        * then skb should also be non-NULL.
+        *
+        * Apparently *ppt_prev assignment above holds this invariant due to
+        * skb dereferencing near it.
+        */
+       *pskb = skb;
        return ret;
 }
 
@@ -5183,7 +5193,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
        struct packet_type *pt_prev = NULL;
        int ret;
 
-       ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+       ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
        if (pt_prev)
                ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
                                         skb->dev, pt_prev, orig_dev);
@@ -5261,7 +5271,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
                struct packet_type *pt_prev = NULL;
 
                skb_list_del_init(skb);
-               __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
+               __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
                if (!pt_prev)
                        continue;
                if (pt_curr != pt_prev || od_curr != orig_dev) {
@@ -8907,11 +8917,13 @@ static void netdev_sync_lower_features(struct net_device *upper,
                        netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
                                   &feature, lower->name);
                        lower->wanted_features &= ~feature;
-                       netdev_update_features(lower);
+                       __netdev_update_features(lower);
 
                        if (unlikely(lower->features & feature))
                                netdev_WARN(upper, "failed to disable %pNF on %s!\n",
                                            &feature, lower->name);
+                       else
+                               netdev_features_change(lower);
                }
        }
 }
index 80f97722f31f0d4de8b00048e0c3b6574b0986b9..899edcee7dab02e1d20a2db971f566b88c547d67 100644 (file)
@@ -4283,6 +4283,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
                end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
                end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
                dump = false;
+
+               if (start_offset == end_offset) {
+                       err = 0;
+                       goto nla_put_failure;
+               }
        }
 
        err = devlink_nl_region_read_snapshot_fill(skb, devlink,
@@ -5363,6 +5368,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
 {
        enum devlink_health_reporter_state prev_health_state;
        struct devlink *devlink = reporter->devlink;
+       unsigned long recover_ts_threshold;
 
        /* write a log message of the current error */
        WARN_ON(!msg);
@@ -5373,10 +5379,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
        devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
 
        /* abort if the previous error wasn't recovered */
+       recover_ts_threshold = reporter->last_recovery_ts +
+                              msecs_to_jiffies(reporter->graceful_period);
        if (reporter->auto_recover &&
            (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
-            jiffies - reporter->last_recovery_ts <
-            msecs_to_jiffies(reporter->graceful_period))) {
+            (reporter->last_recovery_ts && reporter->recovery_count &&
+             time_is_after_jiffies(recover_ts_threshold)))) {
                trace_devlink_health_recover_aborted(devlink,
                                                     reporter->ops->name,
                                                     reporter->health_state,
index 8e33cec9fc4ef8e3803ddc3457998c05d77f211d..2ee7bc4c9e03fe220171ba6eb6866ced3dd5669e 100644 (file)
@@ -213,6 +213,7 @@ static void sched_send_work(struct timer_list *t)
 static void trace_drop_common(struct sk_buff *skb, void *location)
 {
        struct net_dm_alert_msg *msg;
+       struct net_dm_drop_point *point;
        struct nlmsghdr *nlh;
        struct nlattr *nla;
        int i;
@@ -231,11 +232,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
+       point = msg->points;
        for (i = 0; i < msg->entries; i++) {
-               if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
-                       msg->points[i].count++;
+               if (!memcmp(&location, &point->pc, sizeof(void *))) {
+                       point->count++;
                        goto out;
                }
+               point++;
        }
        if (msg->entries == dm_hit_limit)
                goto out;
@@ -244,8 +247,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
         */
        __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
        nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
-       memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
-       msg->points[msg->entries].count = 1;
+       memcpy(point->pc, &location, sizeof(void *));
+       point->count = 1;
        msg->entries++;
 
        if (!timer_pending(&data->send_timer)) {
index 7d6ceaa54d2147584e2033956c4740e4d0ef4af1..5cc9276f102383e1a10d0c88f2bd16e0638445ef 100644 (file)
@@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
                        }
                        pop = 0;
                } else if (pop >= sge->length - a) {
-                       sge->length = a;
                        pop -= (sge->length - a);
+                       sge->length = a;
                }
        }
 
index 3eff84824c8b451144fab6be6c772bf29d1c5345..5dceed467f6413555d4cd62991132542bbcfd864 100644 (file)
@@ -160,12 +160,10 @@ out:
        return ret;
 }
 
-int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+static int flow_dissector_bpf_prog_detach(struct net *net)
 {
        struct bpf_prog *attached;
-       struct net *net;
 
-       net = current->nsproxy->net_ns;
        mutex_lock(&flow_dissector_mutex);
        attached = rcu_dereference_protected(net->flow_dissector_prog,
                                             lockdep_is_held(&flow_dissector_mutex));
@@ -179,6 +177,24 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
        return 0;
 }
 
+int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+{
+       return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns);
+}
+
+static void __net_exit flow_dissector_pernet_pre_exit(struct net *net)
+{
+       /* We're not racing with attach/detach because there are no
+        * references to netns left when pre_exit gets called.
+        */
+       if (rcu_access_pointer(net->flow_dissector_prog))
+               flow_dissector_bpf_prog_detach(net);
+}
+
+static struct pernet_operations flow_dissector_pernet_ops __net_initdata = {
+       .pre_exit = flow_dissector_pernet_pre_exit,
+};
+
 /**
  * __skb_flow_get_ports - extract the upper layer ports and return them
  * @skb: sk_buff to extract the ports from
@@ -1836,7 +1852,7 @@ static int __init init_default_flow_dissectors(void)
        skb_flow_dissector_init(&flow_keys_basic_dissector,
                                flow_keys_basic_dissector_keys,
                                ARRAY_SIZE(flow_keys_basic_dissector_keys));
-       return 0;
-}
 
+       return register_pernet_subsys(&flow_dissector_pernet_ops);
+}
 core_initcall(init_default_flow_dissectors);
index 39d37d0ef575bf38eb1219c476970ddab333a176..dbe0c6ead773f5909c55ce73281a19e5a99f94f1 100644 (file)
@@ -1082,8 +1082,8 @@ static void neigh_timer_handler(struct timer_list *t)
        }
 
        if (neigh->nud_state & NUD_IN_TIMER) {
-               if (time_before(next, jiffies + HZ/2))
-                       next = jiffies + HZ/2;
+               if (time_before(next, jiffies + HZ/100))
+                       next = jiffies + HZ/100;
                if (!mod_timer(&neigh->timer, next))
                        neigh_hold(neigh);
        }
@@ -1956,6 +1956,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                                   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
        }
 
+       if (protocol)
+               neigh->protocol = protocol;
+
        if (ndm->ndm_flags & NTF_EXT_LEARNED)
                flags |= NEIGH_UPDATE_F_EXT_LEARNED;
 
@@ -1969,9 +1972,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
                                     NETLINK_CB(skb).portid, extack);
 
-       if (protocol)
-               neigh->protocol = protocol;
-
        neigh_release(neigh);
 
 out:
index 8881dd943dd00a415ee8262a1ce3c6920d04b7c6..9bd4cab7d510f234eb83d5e88640474d5d4de072 100644 (file)
@@ -236,6 +236,8 @@ static void net_prio_attach(struct cgroup_taskset *tset)
        struct task_struct *p;
        struct cgroup_subsys_state *css;
 
+       cgroup_sk_alloc_disable();
+
        cgroup_taskset_for_each(p, css, tset) {
                void *v = (void *)(unsigned long)css->id;
 
index 90509c37d29116b14b3b1849222c3a6148a0cb38..b714162213aeae98bfee24d8b457547fe7abab4f 100644 (file)
@@ -2364,7 +2364,6 @@ static void sk_leave_memory_pressure(struct sock *sk)
        }
 }
 
-/* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
 
index 9a271a58a41dcf8fa352284f2d23477038e269d2..d90665b465b8ab6ceb2f85750af9eef9dc87cac5 100644 (file)
@@ -459,7 +459,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
                if (err)
-                       goto teardown;
+                       continue;
        }
 
        return 0;
index b5c535af63a351182e55d46dd712826e97e9eab5..a621367c6e8c292997984534614b0caea1fa991a 100644 (file)
@@ -289,7 +289,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
 {
        struct dsa_port *cpu_dp = dev->dsa_ptr;
 
-       dev->netdev_ops = cpu_dp->orig_ndo_ops;
+       if (cpu_dp->orig_ndo_ops)
+               dev->netdev_ops = cpu_dp->orig_ndo_ops;
        cpu_dp->orig_ndo_ops = NULL;
 }
 
index d1068803cd1116781e53052a6defe8b9ae9b9fe1..d3bcb9afa795ae09660bb59915eb7bcd8b5b9a60 100644 (file)
@@ -856,20 +856,18 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
        struct dsa_port *to_dp;
        int err;
 
-       act = &cls->rule->action.entries[0];
-
        if (!ds->ops->port_mirror_add)
                return -EOPNOTSUPP;
 
-       if (!act->dev)
-               return -EINVAL;
-
        if (!flow_action_basic_hw_stats_check(&cls->rule->action,
                                              cls->common.extack))
                return -EOPNOTSUPP;
 
        act = &cls->rule->action.entries[0];
 
+       if (!act->dev)
+               return -EINVAL;
+
        if (!dsa_slave_dev_check(act->dev))
                return -EOPNOTSUPP;
 
@@ -1738,6 +1736,7 @@ int dsa_slave_create(struct dsa_port *port)
        if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
                slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        slave_dev->hw_features |= NETIF_F_HW_TC;
+       slave_dev->features |= NETIF_F_LLTX;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        if (!IS_ERR_OR_NULL(port->mac))
                ether_addr_copy(slave_dev->dev_addr, port->mac);
index b5705cba831848189c8981e1027e0ad62eba0efe..d6619edd53e5a25068811c4d457000c31b7dc6a4 100644 (file)
@@ -15,6 +15,7 @@
 #define MTK_HDR_XMIT_TAGGED_TPID_8100  1
 #define MTK_HDR_RECV_SOURCE_PORT_MASK  GENMASK(2, 0)
 #define MTK_HDR_XMIT_DP_BIT_MASK       GENMASK(5, 0)
+#define MTK_HDR_XMIT_SA_DIS            BIT(6)
 
 static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
                                    struct net_device *dev)
@@ -22,6 +23,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
        struct dsa_port *dp = dsa_slave_to_port(dev);
        u8 *mtk_tag;
        bool is_vlan_skb = true;
+       unsigned char *dest = eth_hdr(skb)->h_dest;
+       bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+                               !is_broadcast_ether_addr(dest);
 
        /* Build the special tag after the MAC Source Address. If VLAN header
         * is present, it's required that VLAN header and special tag is
@@ -47,6 +51,10 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
                     MTK_HDR_XMIT_UNTAGGED;
        mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
 
+       /* Disable SA learning for multicast frames */
+       if (unlikely(is_multicast_skb))
+               mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
+
        /* Tag control information is kept for 802.1Q */
        if (!is_vlan_skb) {
                mtk_tag[2] = 0;
@@ -61,6 +69,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
 {
        int port;
        __be16 *phdr, hdr;
+       unsigned char *dest = eth_hdr(skb)->h_dest;
+       bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+                               !is_broadcast_ether_addr(dest);
 
        if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
                return NULL;
@@ -86,6 +97,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!skb->dev)
                return NULL;
 
+       /* Only unicast or broadcast frames are offloaded */
+       if (likely(!is_multicast_skb))
+               skb->offload_fwd_mark = 1;
+
        return skb;
 }
 
index 0c772318c02321d8b6107f81c6d5fb48e21b61f6..ed53572101933e864ae10b45c66b989761598bf1 100644 (file)
@@ -342,7 +342,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
        ret = ops->reply_size(req_info, reply_data);
        if (ret < 0)
                goto err_cleanup;
-       reply_len = ret;
+       reply_len = ret + ethnl_reply_header_size();
        ret = -ENOMEM;
        rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd,
                                ops->hdr_attr, info, &reply_payload);
@@ -588,7 +588,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
        ret = ops->reply_size(req_info, reply_data);
        if (ret < 0)
                goto err_cleanup;
-       reply_len = ret;
+       reply_len = ret + ethnl_reply_header_size();
        ret = -ENOMEM;
        skb = genlmsg_new(reply_len, GFP_KERNEL);
        if (!skb)
index 95eae5c68a52420404d2e7cbcebb28b764056fc1..0eed4e4909ab86914be4948b2fe060816c49db45 100644 (file)
@@ -324,7 +324,6 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
        int len = 0;
        int ret;
 
-       len += ethnl_reply_header_size();
        for (i = 0; i < ETH_SS_COUNT; i++) {
                const struct strset_info *set_info = &data->sets[i];
 
index f4b9f7a3ce5196850db57f3e287eb3ca198d5d9b..25b6ffba26cde9eb80f79d4853a735c0d250ef6b 100644 (file)
@@ -18,7 +18,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 {
        struct sk_buff *skb = *pskb;
        struct hsr_port *port;
-       u16 protocol;
+       __be16 protocol;
 
        if (!skb_mac_header_was_set(skb)) {
                WARN_ONCE(1, "%s: skb invalid", __func__);
index 0bd10a1f477fdfd6bdc8b6c4a14f132280faedcf..a23094b050f8b3a795041ffef0ec171ee9f503c0 100644 (file)
@@ -1258,7 +1258,8 @@ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
                        return ret_val;
                }
 
-               secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+               if (secattr->attr.mls.cat)
+                       secattr->flags |= NETLBL_SECATTR_MLS_CAT;
        }
 
        return 0;
@@ -1439,7 +1440,8 @@ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
                        return ret_val;
                }
 
-               secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+               if (secattr->attr.mls.cat)
+                       secattr->flags |= NETLBL_SECATTR_MLS_CAT;
        }
 
        return 0;
index c0dd561aa19032f8b6637d9b387e4fa404b31828..5267b6b191ebaa540b0012d468fd995d3c79cd8f 100644 (file)
@@ -276,6 +276,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
        err = devinet_sysctl_register(in_dev);
        if (err) {
                in_dev->dead = 1;
+               neigh_parms_release(&arp_tbl, in_dev->arp_parms);
                in_dev_put(in_dev);
                in_dev = NULL;
                goto out;
index 731022cff6006ef900b4e62bd5f0f27fc934d40a..d14133eac4763580b98b5e4d5575403036ced783 100644 (file)
@@ -63,10 +63,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                sp->olen++;
 
                xo = xfrm_offload(skb);
-               if (!xo) {
-                       xfrm_state_put(x);
+               if (!xo)
                        goto out_reset;
-               }
        }
 
        xo->flags |= XFRM_GRO;
@@ -139,19 +137,27 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
-       int proto = xo->proto;
+       u8 proto = xo->proto;
 
        skb->transport_header += x->props.header_len;
 
-       if (proto == IPPROTO_BEETPH) {
-               struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
+       if (x->sel.family != AF_INET6) {
+               if (proto == IPPROTO_BEETPH) {
+                       struct ip_beet_phdr *ph =
+                               (struct ip_beet_phdr *)skb->data;
+
+                       skb->transport_header += ph->hdrlen * 8;
+                       proto = ph->nexthdr;
+               } else {
+                       skb->transport_header -= IPV4_BEET_PHMAXLEN;
+               }
+       } else {
+               __be16 frag;
 
-               skb->transport_header += ph->hdrlen * 8;
-               proto = ph->nexthdr;
-       } else if (x->sel.family != AF_INET6) {
-               skb->transport_header -= IPV4_BEET_PHMAXLEN;
-       } else if (proto == IPPROTO_TCP) {
-               skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+               skb->transport_header +=
+                       ipv6_skip_exthdr(skb, 0, &proto, &frag);
+               if (proto == IPPROTO_TCP)
+                       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
        }
 
        __skb_pull(skb, skb_transport_offset(skb));
index 213be9c050addf59205ed7b9e50c505aedc4524c..41079490a118162e992e44e272b72f3b688093c9 100644 (file)
@@ -309,17 +309,18 @@ bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev)
 {
        bool dev_match = false;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       int ret;
+       if (unlikely(fi->nh)) {
+               dev_match = nexthop_uses_dev(fi->nh, dev);
+       } else {
+               int ret;
 
-       for (ret = 0; ret < fib_info_num_path(fi); ret++) {
-               const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
+               for (ret = 0; ret < fib_info_num_path(fi); ret++) {
+                       const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
 
-               if (nhc->nhc_dev == dev) {
-                       dev_match = true;
-                       break;
-               } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) {
-                       dev_match = true;
-                       break;
+                       if (nhc_l3mdev_matches_dev(nhc, dev)) {
+                               dev_match = true;
+                               break;
+                       }
                }
        }
 #else
@@ -918,7 +919,6 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
        else
                filter->dump_exceptions = false;
 
-       filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC);
        filter->flags    = rtm->rtm_flags;
        filter->protocol = rtm->rtm_protocol;
        filter->rt_type  = rtm->rtm_type;
@@ -990,7 +990,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        if (filter.table_id) {
                tb = fib_get_table(net, filter.table_id);
                if (!tb) {
-                       if (filter.dump_all_families)
+                       if (rtnl_msg_family(cb->nlh) != PF_INET)
                                return skb->len;
 
                        NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
index 4f334b42553853b2cc1de001bd99a53a71de5030..248f1c1959a63c4aa22331d2a937aa5aecaf3f0e 100644 (file)
@@ -1371,6 +1371,26 @@ static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
        return (key ^ prefix) & (prefix | -prefix);
 }
 
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+                        const struct flowi4 *flp)
+{
+       if (nhc->nhc_flags & RTNH_F_DEAD)
+               return false;
+
+       if (ip_ignore_linkdown(nhc->nhc_dev) &&
+           nhc->nhc_flags & RTNH_F_LINKDOWN &&
+           !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
+               return false;
+
+       if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
+               if (flp->flowi4_oif &&
+                   flp->flowi4_oif != nhc->nhc_oif)
+                       return false;
+       }
+
+       return true;
+}
+
 /* should be called with rcu_read_lock */
 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
                     struct fib_result *res, int fib_flags)
@@ -1503,6 +1523,7 @@ found:
        /* Step 3: Process the leaf, if that fails fall back to backtracing */
        hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
                struct fib_info *fi = fa->fa_info;
+               struct fib_nh_common *nhc;
                int nhsel, err;
 
                if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
@@ -1528,26 +1549,25 @@ out_reject:
                if (fi->fib_flags & RTNH_F_DEAD)
                        continue;
 
-               if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) {
-                       err = fib_props[RTN_BLACKHOLE].error;
-                       goto out_reject;
+               if (unlikely(fi->nh)) {
+                       if (nexthop_is_blackhole(fi->nh)) {
+                               err = fib_props[RTN_BLACKHOLE].error;
+                               goto out_reject;
+                       }
+
+                       nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
+                                                    &nhsel);
+                       if (nhc)
+                               goto set_result;
+                       goto miss;
                }
 
                for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
-                       struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
+                       nhc = fib_info_nhc(fi, nhsel);
 
-                       if (nhc->nhc_flags & RTNH_F_DEAD)
+                       if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
                                continue;
-                       if (ip_ignore_linkdown(nhc->nhc_dev) &&
-                           nhc->nhc_flags & RTNH_F_LINKDOWN &&
-                           !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
-                               continue;
-                       if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
-                               if (flp->flowi4_oif &&
-                                   flp->flowi4_oif != nhc->nhc_oif)
-                                       continue;
-                       }
-
+set_result:
                        if (!(fib_flags & FIB_LOOKUP_NOREF))
                                refcount_inc(&fi->fib_clntref);
 
@@ -1568,6 +1588,7 @@ out_reject:
                        return err;
                }
        }
+miss:
 #ifdef CONFIG_IP_FIB_TRIE_STATS
        this_cpu_inc(stats->semantic_match_miss);
 #endif
index 5f34eb9516277ca7b26f5829832d20c6703f8261..65c29f2bd89f49214ad65315a15639c07f3eb845 100644 (file)
 #include <net/addrconf.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
-/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
- *                          only, and any IPv4 addresses if not IPv6 only
- * match_wildcard == false: addresses must be exactly the same, i.e.
- *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
- *                          and 0.0.0.0 equals to 0.0.0.0 only
+/* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
+ *                             if IPv6 only, and any IPv4 addresses
+ *                             if not IPv6 only
+ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
+ *                             IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
+ *                             and 0.0.0.0 equals to 0.0.0.0 only
  */
 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
                                 const struct in6_addr *sk2_rcv_saddr6,
                                 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
                                 bool sk1_ipv6only, bool sk2_ipv6only,
-                                bool match_wildcard)
+                                bool match_sk1_wildcard,
+                                bool match_sk2_wildcard)
 {
        int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
@@ -44,8 +46,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
                if (!sk2_ipv6only) {
                        if (sk1_rcv_saddr == sk2_rcv_saddr)
                                return true;
-                       if (!sk1_rcv_saddr || !sk2_rcv_saddr)
-                               return match_wildcard;
+                       return (match_sk1_wildcard && !sk1_rcv_saddr) ||
+                               (match_sk2_wildcard && !sk2_rcv_saddr);
                }
                return false;
        }
@@ -53,11 +55,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
        if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
                return true;
 
-       if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
+       if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
                return true;
 
-       if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
+       if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
            !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
                return true;
 
@@ -69,18 +71,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
 }
 #endif
 
-/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
- * match_wildcard == false: addresses must be exactly the same, i.e.
- *                          0.0.0.0 only equals to 0.0.0.0
+/* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
+ * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
+ *                             0.0.0.0 only equals to 0.0.0.0
  */
 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
-                                bool sk2_ipv6only, bool match_wildcard)
+                                bool sk2_ipv6only, bool match_sk1_wildcard,
+                                bool match_sk2_wildcard)
 {
        if (!sk2_ipv6only) {
                if (sk1_rcv_saddr == sk2_rcv_saddr)
                        return true;
-               if (!sk1_rcv_saddr || !sk2_rcv_saddr)
-                       return match_wildcard;
+               return (match_sk1_wildcard && !sk1_rcv_saddr) ||
+                       (match_sk2_wildcard && !sk2_rcv_saddr);
        }
        return false;
 }
@@ -96,10 +99,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
                                            sk2->sk_rcv_saddr,
                                            ipv6_only_sock(sk),
                                            ipv6_only_sock(sk2),
+                                           match_wildcard,
                                            match_wildcard);
 #endif
        return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
-                                   ipv6_only_sock(sk2), match_wildcard);
+                                   ipv6_only_sock(sk2), match_wildcard,
+                                   match_wildcard);
 }
 EXPORT_SYMBOL(inet_rcv_saddr_equal);
 
@@ -285,10 +290,10 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
                                            tb->fast_rcv_saddr,
                                            sk->sk_rcv_saddr,
                                            tb->fast_ipv6_only,
-                                           ipv6_only_sock(sk), true);
+                                           ipv6_only_sock(sk), true, false);
 #endif
        return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
-                                   ipv6_only_sock(sk), true);
+                                   ipv6_only_sock(sk), true, false);
 }
 
 /* Obtain a reference to a local port for the given sock,
index 1b4e6f298648d9cdce5b482a7ba25ae4733bfea6..1dda7c155c484da6995b363a2a6fff441b202a31 100644 (file)
@@ -93,7 +93,28 @@ static int vti_rcv_proto(struct sk_buff *skb)
 
 static int vti_rcv_tunnel(struct sk_buff *skb)
 {
-       return vti_rcv(skb, ip_hdr(skb)->saddr, true);
+       struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
+       const struct iphdr *iph = ip_hdr(skb);
+       struct ip_tunnel *tunnel;
+
+       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+                                 iph->saddr, iph->daddr, 0);
+       if (tunnel) {
+               struct tnl_ptk_info tpi = {
+                       .proto = htons(ETH_P_IP),
+               };
+
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+               if (iptunnel_pull_header(skb, 0, tpi.proto, false))
+                       goto drop;
+               return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
+       }
+
+       return -EINVAL;
+drop:
+       kfree_skb(skb);
+       return 0;
 }
 
 static int vti_rcv_cb(struct sk_buff *skb, int err)
index 2f01cf6fa0deffb6f86a4db89b0aa8b951d0f352..678575adaf3b74a154bffff439a1ec53f13358d5 100644 (file)
@@ -698,7 +698,7 @@ out:
 
 rtnl_link_failed:
 #if IS_ENABLED(CONFIG_MPLS)
-       xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
+       xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
 xfrm_tunnel_mplsip_failed:
 
 #endif
index 9cf83cc85e4ad5f79d949b1e46ee031e732b0008..b2363b82b48d5ea2c31c34581d151fc2265c743d 100644 (file)
@@ -109,8 +109,10 @@ static void mroute_clean_tables(struct mr_table *mrt, int flags);
 static void ipmr_expire_process(struct timer_list *t);
 
 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
-#define ipmr_for_each_table(mrt, net) \
-       list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
+#define ipmr_for_each_table(mrt, net)                                  \
+       list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list,        \
+                               lockdep_rtnl_is_held() ||               \
+                               list_empty(&net->ipv4.mr_tables))
 
 static struct mr_table *ipmr_mr_table_iter(struct net *net,
                                           struct mr_table *mrt)
@@ -2611,7 +2613,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
 
                mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
                if (!mrt) {
-                       if (filter.dump_all_families)
+                       if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
                                return skb->len;
 
                        NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
index 3c25a467b3efc9c408ca6f51f622675c81ee4710..7afde8828b4c9c3f17f6ae3beea8d7f7d0b677eb 100644 (file)
@@ -166,8 +166,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
                break;
        default:
                pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
-                        msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
-                                              pptp_msg_name[0]);
+                        pptp_msg_name(msg));
                fallthrough;
        case PPTP_SET_LINK_INFO:
                /* only need to NAT in case PAC is behind NAT box */
@@ -268,9 +267,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
                pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
                break;
        default:
-               pr_debug("unknown inbound packet %s\n",
-                        msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
-                                              pptp_msg_name[0]);
+               pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
                fallthrough;
        case PPTP_START_SESSION_REQUEST:
        case PPTP_START_SESSION_REPLY:
index fdfca534d0942016a61174ab1e99fc056d6bc65c..563f71bcb2d74764fc2b1379bdc75d0c2c9141cf 100644 (file)
@@ -63,9 +63,16 @@ static void nexthop_free_mpath(struct nexthop *nh)
        int i;
 
        nhg = rcu_dereference_raw(nh->nh_grp);
-       for (i = 0; i < nhg->num_nh; ++i)
-               WARN_ON(nhg->nh_entries[i].nh);
+       for (i = 0; i < nhg->num_nh; ++i) {
+               struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+               WARN_ON(!list_empty(&nhge->nh_list));
+               nexthop_put(nhge->nh);
+       }
+
+       WARN_ON(nhg->spare == nhg);
 
+       kfree(nhg->spare);
        kfree(nhg);
 }
 
@@ -276,6 +283,7 @@ out:
        return 0;
 
 nla_put_failure:
+       nlmsg_cancel(skb, nlh);
        return -EMSGSIZE;
 }
 
@@ -433,7 +441,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
                if (!valid_group_nh(nh, len, extack))
                        return -EINVAL;
        }
-       for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) {
+       for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
                if (!tb[i])
                        continue;
 
@@ -693,41 +701,56 @@ static void nh_group_rebalance(struct nh_group *nhg)
        }
 }
 
-static void remove_nh_grp_entry(struct nh_grp_entry *nhge,
-                               struct nh_group *nhg,
+static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
                                struct nl_info *nlinfo)
 {
+       struct nh_grp_entry *nhges, *new_nhges;
+       struct nexthop *nhp = nhge->nh_parent;
        struct nexthop *nh = nhge->nh;
-       struct nh_grp_entry *nhges;
-       bool found = false;
-       int i;
+       struct nh_group *nhg, *newg;
+       int i, j;
 
        WARN_ON(!nh);
 
-       nhges = nhg->nh_entries;
-       for (i = 0; i < nhg->num_nh; ++i) {
-               if (found) {
-                       nhges[i-1].nh = nhges[i].nh;
-                       nhges[i-1].weight = nhges[i].weight;
-                       list_del(&nhges[i].nh_list);
-                       list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list);
-               } else if (nhg->nh_entries[i].nh == nh) {
-                       found = true;
-               }
-       }
+       nhg = rtnl_dereference(nhp->nh_grp);
+       newg = nhg->spare;
 
-       if (WARN_ON(!found))
+       /* last entry, keep it visible and remove the parent */
+       if (nhg->num_nh == 1) {
+               remove_nexthop(net, nhp, nlinfo);
                return;
+       }
+
+       newg->has_v4 = nhg->has_v4;
+       newg->mpath = nhg->mpath;
+       newg->num_nh = nhg->num_nh;
 
-       nhg->num_nh--;
-       nhg->nh_entries[nhg->num_nh].nh = NULL;
+       /* copy old entries to new except the one getting removed */
+       nhges = nhg->nh_entries;
+       new_nhges = newg->nh_entries;
+       for (i = 0, j = 0; i < nhg->num_nh; ++i) {
+               /* current nexthop getting removed */
+               if (nhg->nh_entries[i].nh == nh) {
+                       newg->num_nh--;
+                       continue;
+               }
 
-       nh_group_rebalance(nhg);
+               list_del(&nhges[i].nh_list);
+               new_nhges[j].nh_parent = nhges[i].nh_parent;
+               new_nhges[j].nh = nhges[i].nh;
+               new_nhges[j].weight = nhges[i].weight;
+               list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
+               j++;
+       }
 
-       nexthop_put(nh);
+       nh_group_rebalance(newg);
+       rcu_assign_pointer(nhp->nh_grp, newg);
+
+       list_del(&nhge->nh_list);
+       nexthop_put(nhge->nh);
 
        if (nlinfo)
-               nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo);
+               nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
 }
 
 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
@@ -735,17 +758,11 @@ static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
 {
        struct nh_grp_entry *nhge, *tmp;
 
-       list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) {
-               struct nh_group *nhg;
-
-               list_del(&nhge->nh_list);
-               nhg = rtnl_dereference(nhge->nh_parent->nh_grp);
-               remove_nh_grp_entry(nhge, nhg, nlinfo);
+       list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
+               remove_nh_grp_entry(net, nhge, nlinfo);
 
-               /* if this group has no more entries then remove it */
-               if (!nhg->num_nh)
-                       remove_nexthop(net, nhge->nh_parent, nlinfo);
-       }
+       /* make sure all see the newly published array before releasing rtnl */
+       synchronize_rcu();
 }
 
 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
@@ -759,10 +776,7 @@ static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
                if (WARN_ON(!nhge->nh))
                        continue;
 
-               list_del(&nhge->nh_list);
-               nexthop_put(nhge->nh);
-               nhge->nh = NULL;
-               nhg->num_nh--;
+               list_del_init(&nhge->nh_list);
        }
 }
 
@@ -1085,6 +1099,7 @@ static struct nexthop *nexthop_create_group(struct net *net,
 {
        struct nlattr *grps_attr = cfg->nh_grp;
        struct nexthop_grp *entry = nla_data(grps_attr);
+       u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
        struct nh_group *nhg;
        struct nexthop *nh;
        int i;
@@ -1095,12 +1110,21 @@ static struct nexthop *nexthop_create_group(struct net *net,
 
        nh->is_group = 1;
 
-       nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry));
+       nhg = nexthop_grp_alloc(num_nh);
        if (!nhg) {
                kfree(nh);
                return ERR_PTR(-ENOMEM);
        }
 
+       /* spare group used for removals */
+       nhg->spare = nexthop_grp_alloc(num_nh);
+       if (!nhg) {
+               kfree(nhg);
+               kfree(nh);
+               return NULL;
+       }
+       nhg->spare->spare = nhg;
+
        for (i = 0; i < nhg->num_nh; ++i) {
                struct nexthop *nhe;
                struct nh_info *nhi;
@@ -1132,6 +1156,7 @@ out_no_nh:
        for (; i >= 0; --i)
                nexthop_put(nhg->nh_entries[i].nh);
 
+       kfree(nhg->spare);
        kfree(nhg);
        kfree(nh);
 
index 788c69d9bfe0d20dd1f0884e2d9939df31442bb5..b73f540fa19be2ba93ebcc7df30f14ecb0faaf05 100644 (file)
@@ -491,18 +491,16 @@ u32 ip_idents_reserve(u32 hash, int segs)
        atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
        u32 old = READ_ONCE(*p_tstamp);
        u32 now = (u32)jiffies;
-       u32 new, delta = 0;
+       u32 delta = 0;
 
        if (old != now && cmpxchg(p_tstamp, old, now) == old)
                delta = prandom_u32_max(now - old);
 
-       /* Do not use atomic_add_return() as it makes UBSAN unhappy */
-       do {
-               old = (u32)atomic_read(p_id);
-               new = old + delta + segs;
-       } while (atomic_cmpxchg(p_id, old, new) != old);
-
-       return new - segs;
+       /* If UBSAN reports an error there, please make sure your compiler
+        * supports -fno-strict-overflow before reporting it that was a bug
+        * in UBSAN, and it has been fixed in GCC-8.
+        */
+       return atomic_add_return(segs + delta, p_id) - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
@@ -915,7 +913,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        /* Check for load limit; set rate_last to the latest sent
         * redirect.
         */
-       if (peer->rate_tokens == 0 ||
+       if (peer->n_redirects == 0 ||
            time_after(jiffies,
                       (peer->rate_last +
                        (ip_rt_redirect_load << peer->n_redirects)))) {
index 6d87de434377e3741314772e5fd866de1c599108..dd401757eea1f0187b0e547828f794e62eb895b8 100644 (file)
@@ -476,9 +476,17 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
                                          int target, struct sock *sk)
 {
-       return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
-               (sk->sk_prot->stream_memory_read ?
-               sk->sk_prot->stream_memory_read(sk) : false);
+       int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
+
+       if (avail > 0) {
+               if (avail >= target)
+                       return true;
+               if (tcp_rmem_pressure(sk))
+                       return true;
+       }
+       if (sk->sk_prot->stream_memory_read)
+               return sk->sk_prot->stream_memory_read(sk);
+       return false;
 }
 
 /*
@@ -1756,10 +1764,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
 
        down_read(&current->mm->mmap_sem);
 
-       ret = -EINVAL;
        vma = find_vma(current->mm, address);
-       if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
-               goto out;
+       if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
+               up_read(&current->mm->mmap_sem);
+               return -EINVAL;
+       }
        zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
 
        tp = tcp_sk(sk);
@@ -2154,13 +2163,15 @@ skip_copy:
                        tp->urg_data = 0;
                        tcp_fast_path_check(sk);
                }
-               if (used + offset < skb->len)
-                       continue;
 
                if (TCP_SKB_CB(skb)->has_rxtstamp) {
                        tcp_update_recv_tstamps(skb, &tss);
                        cmsg_flags |= 2;
                }
+
+               if (used + offset < skb->len)
+                       continue;
+
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                        goto found_fin_ok;
                if (!(flags & MSG_PEEK))
index 5a05327f97c17c53ae5ba884255371c9611a29a9..629aaa9a1eb99537c02041b12e1c20714a3b93f9 100644 (file)
@@ -125,7 +125,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
 
        if (!ret) {
                msg->sg.start = i;
-               msg->sg.size -= apply_bytes;
                sk_psock_queue_msg(psock, tmp);
                sk_psock_data_ready(sk, psock);
        } else {
@@ -262,14 +261,17 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        struct sk_psock *psock;
        int copied, ret;
 
+       if (unlikely(flags & MSG_ERRQUEUE))
+               return inet_recv_error(sk, msg, len, addr_len);
+
        psock = sk_psock_get(sk);
        if (unlikely(!psock))
                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-       if (unlikely(flags & MSG_ERRQUEUE))
-               return inet_recv_error(sk, msg, len, addr_len);
        if (!skb_queue_empty(&sk->sk_receive_queue) &&
-           sk_psock_queue_empty(psock))
+           sk_psock_queue_empty(psock)) {
+               sk_psock_put(sk, psock);
                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+       }
        lock_sock(sk);
 msg_bytes_ready:
        copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
index bf4ced9273e8e84b95204db9bfe86867c0a3ee13..29c6fc8c7716881ec37ad08fbd3497747b9350fe 100644 (file)
@@ -3926,10 +3926,6 @@ void tcp_parse_options(const struct net *net,
                                 */
                                break;
 #endif
-                       case TCPOPT_MPTCP:
-                               mptcp_parse_option(skb, ptr, opsize, opt_rx);
-                               break;
-
                        case TCPOPT_FASTOPEN:
                                tcp_parse_fastopen_option(
                                        opsize - TCPOLEN_FASTOPEN_BASE,
@@ -4761,7 +4757,8 @@ void tcp_data_ready(struct sock *sk)
        const struct tcp_sock *tp = tcp_sk(sk);
        int avail = tp->rcv_nxt - tp->copied_seq;
 
-       if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE))
+       if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
+           !sock_flag(sk, SOCK_DONE))
                return;
 
        sk->sk_data_ready(sk);
@@ -5990,9 +5987,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
-               if (sk_is_mptcp(sk))
-                       mptcp_rcv_synsent(sk);
-
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
index 221c81f85cbfa36172caa4b3e2df721d888d6400..8d3f66c310dbde7109734a0b6e6d1fdf08a178f0 100644 (file)
@@ -1047,7 +1047,8 @@ static int calipso_opt_getattr(const unsigned char *calipso,
                        goto getattr_return;
                }
 
-               secattr->flags |= NETLBL_SECATTR_MLS_CAT;
+               if (secattr->attr.mls.cat)
+                       secattr->flags |= NETLBL_SECATTR_MLS_CAT;
        }
 
        secattr->type = NETLBL_NLTYPE_CALIPSO;
index 8eab2c869d6154e212a2630976c4e6d468457ab2..ab0eea336c70d35f73faf05f457986a31da3fb61 100644 (file)
@@ -85,10 +85,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                sp->olen++;
 
                xo = xfrm_offload(skb);
-               if (!xo) {
-                       xfrm_state_put(x);
+               if (!xo)
                        goto out_reset;
-               }
        }
 
        xo->flags |= XFRM_GRO;
@@ -123,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
        struct ip_esp_hdr *esph;
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct xfrm_offload *xo = xfrm_offload(skb);
-       int proto = iph->nexthdr;
+       u8 proto = iph->nexthdr;
 
        skb_push(skb, -skb_network_offset(skb));
+
+       if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
+               __be16 frag;
+
+               ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
+       }
+
        esph = ip_esp_hdr(skb);
        *skb_mac_header(skb) = IPPROTO_ESP;
 
@@ -166,23 +171,31 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
-       int proto = xo->proto;
+       u8 proto = xo->proto;
 
        skb->transport_header += x->props.header_len;
 
-       if (proto == IPPROTO_BEETPH) {
-               struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
-
-               skb->transport_header += ph->hdrlen * 8;
-               proto = ph->nexthdr;
-       }
-
        if (x->sel.family != AF_INET6) {
                skb->transport_header -=
                        (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
 
+               if (proto == IPPROTO_BEETPH) {
+                       struct ip_beet_phdr *ph =
+                               (struct ip_beet_phdr *)skb->data;
+
+                       skb->transport_header += ph->hdrlen * 8;
+                       proto = ph->nexthdr;
+               } else {
+                       skb->transport_header -= IPV4_BEET_PHMAXLEN;
+               }
+
                if (proto == IPPROTO_TCP)
                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+       } else {
+               __be16 frag;
+
+               skb->transport_header +=
+                       ipv6_skip_exthdr(skb, 0, &proto, &frag);
        }
 
        __skb_pull(skb, skb_transport_offset(skb));
index 46ed567194761d32ea533c688955a052d1e58de4..20314895509cc16f60d4b6953c25b8d01ee68f53 100644 (file)
@@ -664,7 +664,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
        if (arg.filter.table_id) {
                tb = fib6_get_table(net, arg.filter.table_id);
                if (!tb) {
-                       if (arg.filter.dump_all_families)
+                       if (rtnl_msg_family(cb->nlh) != PF_INET6)
                                goto out;
 
                        NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
index 65a54d74acc1f04156c4fec3683165ee09cb04eb..1f4d20e97c07f90ae19a9b5b14a6598b38340430 100644 (file)
@@ -98,7 +98,8 @@ static void ipmr_expire_process(struct timer_list *t);
 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
 #define ip6mr_for_each_table(mrt, net) \
        list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
-                               lockdep_rtnl_is_held())
+                               lockdep_rtnl_is_held() || \
+                               list_empty(&net->ipv6.mr6_tables))
 
 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
                                            struct mr_table *mrt)
@@ -2502,7 +2503,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
 
                mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
                if (!mrt) {
-                       if (filter.dump_all_families)
+                       if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
                                return skb->len;
 
                        NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
index 310cbddaa533fcefc14c8fe3566afab494b64bc0..ff847a324220bc4cac8b103640f7e1a5db374a87 100644 (file)
@@ -1385,9 +1385,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
        }
        ip6_rt_copy_init(pcpu_rt, res);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
+
+       if (f6i->nh)
+               pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
+
        return pcpu_rt;
 }
 
+static bool rt6_is_valid(const struct rt6_info *rt6)
+{
+       return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
+}
+
 /* It should be called with rcu_read_lock() acquired */
 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
 {
@@ -1395,6 +1404,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
 
        pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
 
+       if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
+               struct rt6_info *prev, **p;
+
+               p = this_cpu_ptr(res->nh->rt6i_pcpu);
+               prev = xchg(p, NULL);
+               if (prev) {
+                       dst_dev_put(&prev->dst);
+                       dst_release(&prev->dst);
+               }
+
+               pcpu_rt = NULL;
+       }
+
        return pcpu_rt;
 }
 
@@ -2593,6 +2615,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 
        rt = container_of(dst, struct rt6_info, dst);
 
+       if (rt->sernum)
+               return rt6_is_valid(rt) ? dst : NULL;
+
        rcu_read_lock();
 
        /* All IPV6 dsts are created with ->obsolete set to the value
@@ -2697,8 +2722,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
        const struct in6_addr *daddr, *saddr;
        struct rt6_info *rt6 = (struct rt6_info *)dst;
 
-       if (dst_metric_locked(dst, RTAX_MTU))
-               return;
+       /* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
+        * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
+        * [see also comment in rt6_mtu_change_route()]
+        */
 
        if (iph) {
                daddr = &iph->daddr;
index 4c7e0a27fa9cb588ee43fb81f5b2683271bf1754..37b434293bda3bb1986e4c0c09362473c11db179 100644 (file)
@@ -27,8 +27,9 @@
 
 bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
 {
-       int trailing;
        unsigned int tlv_offset;
+       int max_last_entry;
+       int trailing;
 
        if (srh->type != IPV6_SRCRT_TYPE_4)
                return false;
@@ -36,7 +37,12 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
        if (((srh->hdrlen + 1) << 3) != len)
                return false;
 
-       if (srh->segments_left > srh->first_segment)
+       max_last_entry = (srh->hdrlen / 2) - 1;
+
+       if (srh->first_segment > max_last_entry)
+               return false;
+
+       if (srh->segments_left > srh->first_segment + 1)
                return false;
 
        tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
index fcb53ed1c4fb98de3d60c52542e4c4260582bf3a..6d7ef78c88af059a4cbfb5d89f32ad6d1babfe74 100644 (file)
@@ -1458,6 +1458,9 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
        if (sk->sk_type != SOCK_DGRAM)
                return -EPROTONOSUPPORT;
 
+       if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+               return -EPROTONOSUPPORT;
+
        if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
            (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
                return -EPROTONOSUPPORT;
index 0d7c887a2b75db65afba7955a2bf9572a6a37786..955662a6dee754478da0f8ac95d41a787339242b 100644 (file)
@@ -20,7 +20,6 @@
 #include <net/icmp.h>
 #include <net/udp.h>
 #include <net/inet_common.h>
-#include <net/inet_hashtables.h>
 #include <net/tcp_states.h>
 #include <net/protocol.h>
 #include <net/xfrm.h>
@@ -209,15 +208,31 @@ discard:
        return 0;
 }
 
-static int l2tp_ip_open(struct sock *sk)
+static int l2tp_ip_hash(struct sock *sk)
 {
-       /* Prevent autobind. We don't have ports. */
-       inet_sk(sk)->inet_num = IPPROTO_L2TP;
+       if (sk_unhashed(sk)) {
+               write_lock_bh(&l2tp_ip_lock);
+               sk_add_node(sk, &l2tp_ip_table);
+               write_unlock_bh(&l2tp_ip_lock);
+       }
+       return 0;
+}
 
+static void l2tp_ip_unhash(struct sock *sk)
+{
+       if (sk_unhashed(sk))
+               return;
        write_lock_bh(&l2tp_ip_lock);
-       sk_add_node(sk, &l2tp_ip_table);
+       sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip_lock);
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+       /* Prevent autobind. We don't have ports. */
+       inet_sk(sk)->inet_num = IPPROTO_L2TP;
 
+       l2tp_ip_hash(sk);
        return 0;
 }
 
@@ -594,8 +609,8 @@ static struct proto l2tp_ip_prot = {
        .sendmsg           = l2tp_ip_sendmsg,
        .recvmsg           = l2tp_ip_recvmsg,
        .backlog_rcv       = l2tp_ip_backlog_recv,
-       .hash              = inet_hash,
-       .unhash            = inet_unhash,
+       .hash              = l2tp_ip_hash,
+       .unhash            = l2tp_ip_unhash,
        .obj_size          = sizeof(struct l2tp_ip_sock),
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ip_setsockopt,
index d148766f40d117c50fc28092173d3686428d1dfc..0fa694bd3f6a992518cab05feb8922fbf94d9829 100644 (file)
@@ -20,8 +20,6 @@
 #include <net/icmp.h>
 #include <net/udp.h>
 #include <net/inet_common.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
 #include <net/tcp_states.h>
 #include <net/protocol.h>
 #include <net/xfrm.h>
@@ -222,15 +220,31 @@ discard:
        return 0;
 }
 
-static int l2tp_ip6_open(struct sock *sk)
+static int l2tp_ip6_hash(struct sock *sk)
 {
-       /* Prevent autobind. We don't have ports. */
-       inet_sk(sk)->inet_num = IPPROTO_L2TP;
+       if (sk_unhashed(sk)) {
+               write_lock_bh(&l2tp_ip6_lock);
+               sk_add_node(sk, &l2tp_ip6_table);
+               write_unlock_bh(&l2tp_ip6_lock);
+       }
+       return 0;
+}
 
+static void l2tp_ip6_unhash(struct sock *sk)
+{
+       if (sk_unhashed(sk))
+               return;
        write_lock_bh(&l2tp_ip6_lock);
-       sk_add_node(sk, &l2tp_ip6_table);
+       sk_del_node_init(sk);
        write_unlock_bh(&l2tp_ip6_lock);
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+       /* Prevent autobind. We don't have ports. */
+       inet_sk(sk)->inet_num = IPPROTO_L2TP;
 
+       l2tp_ip6_hash(sk);
        return 0;
 }
 
@@ -728,8 +742,8 @@ static struct proto l2tp_ip6_prot = {
        .sendmsg           = l2tp_ip6_sendmsg,
        .recvmsg           = l2tp_ip6_recvmsg,
        .backlog_rcv       = l2tp_ip6_backlog_recv,
-       .hash              = inet6_hash,
-       .unhash            = inet_unhash,
+       .hash              = l2tp_ip6_hash,
+       .unhash            = l2tp_ip6_unhash,
        .obj_size          = sizeof(struct l2tp_ip6_sock),
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
index 38a0383dfbcfaee991f9bbc05cc5d5dfabd76a90..aa5150929996d609ccf8075465e12c70dbd9fc18 100644 (file)
@@ -1103,7 +1103,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
        mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
                               target_flags, mpath->dst, mpath->sn, da, 0,
                               ttl, lifetime, 0, ifmsh->preq_id++, sdata);
+
+       spin_lock_bh(&mpath->state_lock);
+       if (mpath->flags & MESH_PATH_DELETED) {
+               spin_unlock_bh(&mpath->state_lock);
+               goto enddiscovery;
+       }
        mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+       spin_unlock_bh(&mpath->state_lock);
 
 enddiscovery:
        rcu_read_unlock();
index c151628bd4161ac937de0bcd29d038123d03bc9c..0f5a414a936632d16a4647df26472a735611e5ce 100644 (file)
@@ -47,8 +47,6 @@ void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn)
 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
 {
        u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE];
-       __be32 mptcp_hashed_key[SHA256_DIGEST_WORDS];
-       __be32 *hash_out = (__force __be32 *)hmac;
        struct sha256_state state;
        u8 key1be[8];
        u8 key2be[8];
@@ -86,11 +84,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
 
        sha256_init(&state);
        sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE);
-       sha256_final(&state, (u8 *)mptcp_hashed_key);
-
-       /* takes only first 160 bits */
-       for (i = 0; i < 5; i++)
-               hash_out[i] = mptcp_hashed_key[i];
+       sha256_final(&state, (u8 *)hmac);
 }
 
 #ifdef CONFIG_MPTCP_HMAC_TEST
@@ -101,29 +95,29 @@ struct test_cast {
 };
 
 /* we can't reuse RFC 4231 test vectors, as we have constraint on the
- * input and key size, and we truncate the output.
+ * input and key size.
  */
 static struct test_cast tests[] = {
        {
                .key = "0b0b0b0b0b0b0b0b",
                .msg = "48692054",
-               .result = "8385e24fb4235ac37556b6b886db106284a1da67",
+               .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa",
        },
        {
                .key = "aaaaaaaaaaaaaaaa",
                .msg = "dddddddd",
-               .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492",
+               .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9",
        },
        {
                .key = "0102030405060708",
                .msg = "cdcdcdcd",
-               .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6",
+               .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d",
        },
 };
 
 static int __init test_mptcp_crypto(void)
 {
-       char hmac[20], hmac_hex[41];
+       char hmac[32], hmac_hex[65];
        u32 nonce1, nonce2;
        u64 key1, key2;
        u8 msg[8];
@@ -140,11 +134,11 @@ static int __init test_mptcp_crypto(void)
                put_unaligned_be32(nonce2, &msg[4]);
 
                mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
-               for (j = 0; j < 20; ++j)
+               for (j = 0; j < 32; ++j)
                        sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff);
-               hmac_hex[40] = 0;
+               hmac_hex[64] = 0;
 
-               if (memcmp(hmac_hex, tests[i].result, 40))
+               if (memcmp(hmac_hex, tests[i].result, 64))
                        pr_err("test %d failed, got %s expected %s", i,
                               hmac_hex, tests[i].result);
                else
index 4a7c467b99db460387cb8f88e8a33a277229b49d..7793b6011fa7bb5e241074683e567de8b8896a5d 100644 (file)
@@ -7,6 +7,7 @@
 #define pr_fmt(fmt) "MPTCP: " fmt
 
 #include <linux/kernel.h>
+#include <crypto/sha.h>
 #include <net/tcp.h>
 #include <net/mptcp.h>
 #include "protocol.h"
@@ -16,10 +17,10 @@ static bool mptcp_cap_flag_sha256(u8 flags)
        return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
 }
 
-void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
-                       int opsize, struct tcp_options_received *opt_rx)
+static void mptcp_parse_option(const struct sk_buff *skb,
+                              const unsigned char *ptr, int opsize,
+                              struct mptcp_options_received *mp_opt)
 {
-       struct mptcp_options_received *mp_opt = &opt_rx->mptcp;
        u8 subtype = *ptr >> 4;
        int expected_opsize;
        u8 version;
@@ -283,12 +284,20 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
 }
 
 void mptcp_get_options(const struct sk_buff *skb,
-                      struct tcp_options_received *opt_rx)
+                      struct mptcp_options_received *mp_opt)
 {
-       const unsigned char *ptr;
        const struct tcphdr *th = tcp_hdr(skb);
-       int length = (th->doff * 4) - sizeof(struct tcphdr);
+       const unsigned char *ptr;
+       int length;
+
+       /* initialize option status */
+       mp_opt->mp_capable = 0;
+       mp_opt->mp_join = 0;
+       mp_opt->add_addr = 0;
+       mp_opt->rm_addr = 0;
+       mp_opt->dss = 0;
 
+       length = (th->doff * 4) - sizeof(struct tcphdr);
        ptr = (const unsigned char *)(th + 1);
 
        while (length > 0) {
@@ -308,7 +317,7 @@ void mptcp_get_options(const struct sk_buff *skb,
                        if (opsize > length)
                                return; /* don't parse partial options */
                        if (opcode == TCPOPT_MPTCP)
-                               mptcp_parse_option(skb, ptr, opsize, opt_rx);
+                               mptcp_parse_option(skb, ptr, opsize, mp_opt);
                        ptr += opsize - 2;
                        length -= opsize;
                }
@@ -344,28 +353,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
        return false;
 }
 
-void mptcp_rcv_synsent(struct sock *sk)
-{
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       if (subflow->request_mptcp && tp->rx_opt.mptcp.mp_capable) {
-               subflow->mp_capable = 1;
-               subflow->can_ack = 1;
-               subflow->remote_key = tp->rx_opt.mptcp.sndr_key;
-               pr_debug("subflow=%p, remote_key=%llu", subflow,
-                        subflow->remote_key);
-       } else if (subflow->request_join && tp->rx_opt.mptcp.mp_join) {
-               subflow->mp_join = 1;
-               subflow->thmac = tp->rx_opt.mptcp.thmac;
-               subflow->remote_nonce = tp->rx_opt.mptcp.nonce;
-               pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
-                        subflow->thmac, subflow->remote_nonce);
-       } else if (subflow->request_mptcp) {
-               tcp_sk(sk)->is_mptcp = 0;
-       }
-}
-
 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
  * TCP can't schedule delack timer before the subflow is fully established.
  * MPTCP uses the delack timer to do 3rd ack retransmissions
@@ -549,7 +536,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
                                  struct in_addr *addr)
 {
-       u8 hmac[MPTCP_ADDR_HMAC_LEN];
+       u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[7];
 
        msg[0] = addr_id;
@@ -559,14 +546,14 @@ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
 
-       return get_unaligned_be64(hmac);
+       return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
                                   struct in6_addr *addr)
 {
-       u8 hmac[MPTCP_ADDR_HMAC_LEN];
+       u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[19];
 
        msg[0] = addr_id;
@@ -576,7 +563,7 @@ static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
 
-       return get_unaligned_be64(hmac);
+       return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]);
 }
 #endif
 
@@ -709,7 +696,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
        if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
                return subflow->mp_capable;
 
-       if (mp_opt->use_ack) {
+       if (mp_opt->dss && mp_opt->use_ack) {
                /* subflows are fully established as soon as we get any
                 * additional ack.
                 */
@@ -717,8 +704,6 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
                goto fully_established;
        }
 
-       WARN_ON_ONCE(subflow->can_ack);
-
        /* If the first established packet does not contain MP_CAPABLE + data
         * then fallback to TCP
         */
@@ -728,6 +713,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
                return false;
        }
 
+       if (unlikely(!READ_ONCE(msk->pm.server_side)))
+               pr_warn_once("bogus mpc option on established client sk");
        subflow->fully_established = 1;
        subflow->remote_key = mp_opt->sndr_key;
        subflow->can_ack = 1;
@@ -819,41 +806,41 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
        struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-       struct mptcp_options_received *mp_opt;
+       struct mptcp_options_received mp_opt;
        struct mptcp_ext *mpext;
 
-       mp_opt = &opt_rx->mptcp;
-       if (!check_fully_established(msk, sk, subflow, skb, mp_opt))
+       mptcp_get_options(skb, &mp_opt);
+       if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
                return;
 
-       if (mp_opt->add_addr && add_addr_hmac_valid(msk, mp_opt)) {
+       if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
                struct mptcp_addr_info addr;
 
-               addr.port = htons(mp_opt->port);
-               addr.id = mp_opt->addr_id;
-               if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
+               addr.port = htons(mp_opt.port);
+               addr.id = mp_opt.addr_id;
+               if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) {
                        addr.family = AF_INET;
-                       addr.addr = mp_opt->addr;
+                       addr.addr = mp_opt.addr;
                }
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-               else if (mp_opt->family == MPTCP_ADDR_IPVERSION_6) {
+               else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) {
                        addr.family = AF_INET6;
-                       addr.addr6 = mp_opt->addr6;
+                       addr.addr6 = mp_opt.addr6;
                }
 #endif
-               if (!mp_opt->echo)
+               if (!mp_opt.echo)
                        mptcp_pm_add_addr_received(msk, &addr);
-               mp_opt->add_addr = 0;
+               mp_opt.add_addr = 0;
        }
 
-       if (!mp_opt->dss)
+       if (!mp_opt.dss)
                return;
 
        /* we can't wait for recvmsg() to update the ack_seq, otherwise
         * monodirectional flows will stuck
         */
-       if (mp_opt->use_ack)
-               update_una(msk, mp_opt);
+       if (mp_opt.use_ack)
+               update_una(msk, &mp_opt);
 
        mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
        if (!mpext)
@@ -861,8 +848,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
 
        memset(mpext, 0, sizeof(*mpext));
 
-       if (mp_opt->use_map) {
-               if (mp_opt->mpc_map) {
+       if (mp_opt.use_map) {
+               if (mp_opt.mpc_map) {
                        /* this is an MP_CAPABLE carrying MPTCP data
                         * we know this map the first chunk of data
                         */
@@ -872,13 +859,14 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
                        mpext->subflow_seq = 1;
                        mpext->dsn64 = 1;
                        mpext->mpc_map = 1;
+                       mpext->data_fin = 0;
                } else {
-                       mpext->data_seq = mp_opt->data_seq;
-                       mpext->subflow_seq = mp_opt->subflow_seq;
-                       mpext->dsn64 = mp_opt->dsn64;
-                       mpext->data_fin = mp_opt->data_fin;
+                       mpext->data_seq = mp_opt.data_seq;
+                       mpext->subflow_seq = mp_opt.subflow_seq;
+                       mpext->dsn64 = mp_opt.dsn64;
+                       mpext->data_fin = mp_opt.data_fin;
                }
-               mpext->data_len = mp_opt->data_len;
+               mpext->data_len = mp_opt.data_len;
                mpext->use_map = 1;
        }
 }
index b22a63ba2348ae60e7f509ee7772faf19969660e..34dd0e278a82976eac7e7f9ba8ffe6cf8062c78b 100644 (file)
@@ -954,7 +954,8 @@ fallback:
 
                pr_debug("block timeout %ld", timeo);
                mptcp_wait_data(sk, &timeo);
-               if (unlikely(__mptcp_tcp_fallback(msk)))
+               ssock = __mptcp_tcp_fallback(msk);
+               if (unlikely(ssock))
                        goto fallback;
        }
 
@@ -1262,11 +1263,14 @@ static void mptcp_close(struct sock *sk, long timeout)
 
        lock_sock(sk);
 
-       mptcp_token_destroy(msk->token);
        inet_sk_state_store(sk, TCP_CLOSE);
 
-       __mptcp_flush_join_list(msk);
-
+       /* be sure to always acquire the join list lock, to sync vs
+        * mptcp_finish_join().
+        */
+       spin_lock_bh(&msk->join_list_lock);
+       list_splice_tail_init(&msk->join_list, &msk->conn_list);
+       spin_unlock_bh(&msk->join_list_lock);
        list_splice_init(&msk->conn_list, &conn_list);
 
        data_fin_tx_seq = msk->write_seq;
@@ -1316,11 +1320,12 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 
 static int mptcp_disconnect(struct sock *sk, int flags)
 {
-       lock_sock(sk);
-       __mptcp_clear_xmit(sk);
-       release_sock(sk);
-       mptcp_cancel_work(sk);
-       return tcp_disconnect(sk, flags);
+       /* Should never be called.
+        * inet_stream_connect() calls ->disconnect, but that
+        * refers to the subflow socket, not the mptcp one.
+        */
+       WARN_ON_ONCE(1);
+       return 0;
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -1333,7 +1338,7 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
 #endif
 
 struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct tcp_options_received *opt_rx,
+                           const struct mptcp_options_received *mp_opt,
                            struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
@@ -1372,9 +1377,9 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
 
        msk->write_seq = subflow_req->idsn + 1;
        atomic64_set(&msk->snd_una, msk->write_seq);
-       if (opt_rx->mptcp.mp_capable) {
+       if (mp_opt->mp_capable) {
                msk->can_ack = true;
-               msk->remote_key = opt_rx->mptcp.sndr_key;
+               msk->remote_key = mp_opt->sndr_key;
                mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
                ack_seq++;
                msk->ack_seq = ack_seq;
@@ -1455,6 +1460,7 @@ static void mptcp_destroy(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+       mptcp_token_destroy(msk->token);
        if (msk->cached_ext)
                __skb_ext_put(msk->cached_ext);
 
@@ -1621,20 +1627,30 @@ bool mptcp_finish_join(struct sock *sk)
        if (!msk->pm.server_side)
                return true;
 
-       /* passive connection, attach to msk socket */
+       if (!mptcp_pm_allow_new_subflow(msk))
+               return false;
+
+       /* active connections are already on conn_list, and we can't acquire
+        * msk lock here.
+        * use the join list lock as synchronization point and double-check
+        * msk status to avoid racing with mptcp_close()
+        */
+       spin_lock_bh(&msk->join_list_lock);
+       ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
+       if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
+               list_add_tail(&subflow->node, &msk->join_list);
+       spin_unlock_bh(&msk->join_list_lock);
+       if (!ret)
+               return false;
+
+       /* attach to msk socket only after we are sure he will deal with us
+        * at close time
+        */
        parent_sock = READ_ONCE(parent->sk_socket);
        if (parent_sock && !sk->sk_socket)
                mptcp_sock_graft(sk, parent_sock);
-
-       ret = mptcp_pm_allow_new_subflow(msk);
-       if (ret) {
-               /* active connections are already on conn_list */
-               spin_lock_bh(&msk->join_list_lock);
-               if (!WARN_ON_ONCE(!list_empty(&subflow->node)))
-                       list_add_tail(&subflow->node, &msk->join_list);
-               spin_unlock_bh(&msk->join_list_lock);
-       }
-       return ret;
+       subflow->map_seq = msk->ack_seq;
+       return true;
 }
 
 bool mptcp_sk_is_subflow(const struct sock *sk)
@@ -1708,6 +1724,14 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        int err;
 
        lock_sock(sock->sk);
+       if (sock->state != SS_UNCONNECTED && msk->subflow) {
+               /* pending connection or invalid state, let existing subflow
+                * cope with that
+                */
+               ssock = msk->subflow;
+               goto do_connect;
+       }
+
        ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
        if (IS_ERR(ssock)) {
                err = PTR_ERR(ssock);
@@ -1722,9 +1746,17 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
 #endif
 
+do_connect:
        err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
-       inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
-       mptcp_copy_inaddrs(sock->sk, ssock->sk);
+       sock->state = ssock->state;
+
+       /* on successful connect, the msk state will be moved to established by
+        * subflow_finish_connect()
+        */
+       if (!err || err == EINPROGRESS)
+               mptcp_copy_inaddrs(sock->sk, ssock->sk);
+       else
+               inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
 
 unlock:
        release_sock(sock->sk);
index a2b3048037d088cbf17d8cccb383e107d332cb48..d0803dfb81082352e71e61a50d5b9fd58af5d25d 100644 (file)
@@ -81,7 +81,6 @@
 
 /* MPTCP ADD_ADDR flags */
 #define MPTCP_ADDR_ECHO                BIT(0)
-#define MPTCP_ADDR_HMAC_LEN    20
 #define MPTCP_ADDR_IPVERSION_4 4
 #define MPTCP_ADDR_IPVERSION_6 6
 
 #define MPTCP_WORK_RTX         2
 #define MPTCP_WORK_EOF         3
 
+struct mptcp_options_received {
+       u64     sndr_key;
+       u64     rcvr_key;
+       u64     data_ack;
+       u64     data_seq;
+       u32     subflow_seq;
+       u16     data_len;
+       u16     mp_capable : 1,
+               mp_join : 1,
+               dss : 1,
+               add_addr : 1,
+               rm_addr : 1,
+               family : 4,
+               echo : 1,
+               backup : 1;
+       u32     token;
+       u32     nonce;
+       u64     thmac;
+       u8      hmac[20];
+       u8      join_id;
+       u8      use_map:1,
+               dsn64:1,
+               data_fin:1,
+               use_ack:1,
+               ack64:1,
+               mpc_map:1,
+               __unused:2;
+       u8      addr_id;
+       u8      rm_id;
+       union {
+               struct in_addr  addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+               struct in6_addr addr6;
+#endif
+       };
+       u64     ahmac;
+       u16     port;
+};
+
 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
 {
        return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) |
@@ -331,10 +369,10 @@ int mptcp_proto_v6_init(void);
 #endif
 
 struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct tcp_options_received *opt_rx,
+                           const struct mptcp_options_received *mp_opt,
                            struct request_sock *req);
 void mptcp_get_options(const struct sk_buff *skb,
-                      struct tcp_options_received *opt_rx);
+                      struct mptcp_options_received *mp_opt);
 
 void mptcp_finish_connect(struct sock *sk);
 void mptcp_data_ready(struct sock *sk, struct sock *ssk);
index fabd06f2ff455e76cf281c4e6fc2d41b144ac2d9..8968b2c065e7cf7983fb9cb31497a39353e325b4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <crypto/algapi.h>
+#include <crypto/sha.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <net/inet_hashtables.h>
@@ -89,7 +90,7 @@ static bool subflow_token_join_request(struct request_sock *req,
                                       const struct sk_buff *skb)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
-       u8 hmac[MPTCPOPT_HMAC_LEN];
+       u8 hmac[SHA256_DIGEST_SIZE];
        struct mptcp_sock *msk;
        int local_id;
 
@@ -124,12 +125,11 @@ static void subflow_init_req(struct request_sock *req,
 {
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
-       struct tcp_options_received rx_opt;
+       struct mptcp_options_received mp_opt;
 
        pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
 
-       memset(&rx_opt.mptcp, 0, sizeof(rx_opt.mptcp));
-       mptcp_get_options(skb, &rx_opt);
+       mptcp_get_options(skb, &mp_opt);
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
@@ -142,16 +142,16 @@ static void subflow_init_req(struct request_sock *req,
                return;
 #endif
 
-       if (rx_opt.mptcp.mp_capable) {
+       if (mp_opt.mp_capable) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
 
-               if (rx_opt.mptcp.mp_join)
+               if (mp_opt.mp_join)
                        return;
-       } else if (rx_opt.mptcp.mp_join) {
+       } else if (mp_opt.mp_join) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
        }
 
-       if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
+       if (mp_opt.mp_capable && listener->request_mptcp) {
                int err;
 
                err = mptcp_token_new_request(req);
@@ -159,13 +159,13 @@ static void subflow_init_req(struct request_sock *req,
                        subflow_req->mp_capable = 1;
 
                subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
-       } else if (rx_opt.mptcp.mp_join && listener->request_mptcp) {
+       } else if (mp_opt.mp_join && listener->request_mptcp) {
                subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
                subflow_req->mp_join = 1;
-               subflow_req->backup = rx_opt.mptcp.backup;
-               subflow_req->remote_id = rx_opt.mptcp.join_id;
-               subflow_req->token = rx_opt.mptcp.token;
-               subflow_req->remote_nonce = rx_opt.mptcp.nonce;
+               subflow_req->backup = mp_opt.backup;
+               subflow_req->remote_id = mp_opt.join_id;
+               subflow_req->token = mp_opt.token;
+               subflow_req->remote_nonce = mp_opt.nonce;
                pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
                         subflow_req->remote_nonce);
                if (!subflow_token_join_request(req, skb)) {
@@ -202,7 +202,7 @@ static void subflow_v6_init_req(struct request_sock *req,
 /* validate received truncated hmac and create hmac for third ACK */
 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
 {
-       u8 hmac[MPTCPOPT_HMAC_LEN];
+       u8 hmac[SHA256_DIGEST_SIZE];
        u64 thmac;
 
        subflow_generate_hmac(subflow->remote_key, subflow->local_key,
@@ -221,29 +221,55 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_options_received mp_opt;
        struct sock *parent = subflow->conn;
+       struct tcp_sock *tp = tcp_sk(sk);
 
        subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
 
-       if (inet_sk_state_load(parent) != TCP_ESTABLISHED) {
+       if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
                inet_sk_state_store(parent, TCP_ESTABLISHED);
                parent->sk_state_change(parent);
        }
 
-       if (subflow->conn_finished || !tcp_sk(sk)->is_mptcp)
+       /* be sure no special action on any packet other than syn-ack */
+       if (subflow->conn_finished)
+               return;
+
+       subflow->conn_finished = 1;
+
+       mptcp_get_options(skb, &mp_opt);
+       if (subflow->request_mptcp && mp_opt.mp_capable) {
+               subflow->mp_capable = 1;
+               subflow->can_ack = 1;
+               subflow->remote_key = mp_opt.sndr_key;
+               pr_debug("subflow=%p, remote_key=%llu", subflow,
+                        subflow->remote_key);
+       } else if (subflow->request_join && mp_opt.mp_join) {
+               subflow->mp_join = 1;
+               subflow->thmac = mp_opt.thmac;
+               subflow->remote_nonce = mp_opt.nonce;
+               pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
+                        subflow->thmac, subflow->remote_nonce);
+       } else if (subflow->request_mptcp) {
+               tp->is_mptcp = 0;
+       }
+
+       if (!tp->is_mptcp)
                return;
 
        if (subflow->mp_capable) {
                pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
                         subflow->remote_key);
                mptcp_finish_connect(sk);
-               subflow->conn_finished = 1;
 
                if (skb) {
                        pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
                        subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
                }
        } else if (subflow->mp_join) {
+               u8 hmac[SHA256_DIGEST_SIZE];
+
                pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
                         subflow, subflow->thmac,
                         subflow->remote_nonce);
@@ -256,7 +282,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                subflow_generate_hmac(subflow->local_key, subflow->remote_key,
                                      subflow->local_nonce,
                                      subflow->remote_nonce,
-                                     subflow->hmac);
+                                     hmac);
+
+               memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
 
                if (skb)
                        subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
@@ -264,7 +292,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                if (!mptcp_finish_join(sk))
                        goto do_reset;
 
-               subflow->conn_finished = 1;
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
        } else {
 do_reset:
@@ -322,10 +349,10 @@ drop:
 
 /* validate hmac received in third ACK */
 static bool subflow_hmac_valid(const struct request_sock *req,
-                              const struct tcp_options_received *rx_opt)
+                              const struct mptcp_options_received *mp_opt)
 {
        const struct mptcp_subflow_request_sock *subflow_req;
-       u8 hmac[MPTCPOPT_HMAC_LEN];
+       u8 hmac[SHA256_DIGEST_SIZE];
        struct mptcp_sock *msk;
        bool ret;
 
@@ -339,7 +366,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
                              subflow_req->local_nonce, hmac);
 
        ret = true;
-       if (crypto_memneq(hmac, rx_opt->mptcp.hmac, sizeof(hmac)))
+       if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
                ret = false;
 
        sock_put((struct sock *)msk);
@@ -395,7 +422,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 {
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
        struct mptcp_subflow_request_sock *subflow_req;
-       struct tcp_options_received opt_rx;
+       struct mptcp_options_received mp_opt;
        bool fallback_is_fatal = false;
        struct sock *new_msk = NULL;
        bool fallback = false;
@@ -403,7 +430,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
-       opt_rx.mptcp.mp_capable = 0;
+       /* we need later a valid 'mp_capable' value even when options are not
+        * parsed
+        */
+       mp_opt.mp_capable = 0;
        if (tcp_rsk(req)->is_mptcp == 0)
                goto create_child;
 
@@ -418,22 +448,21 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                        goto create_msk;
                }
 
-               mptcp_get_options(skb, &opt_rx);
-               if (!opt_rx.mptcp.mp_capable) {
+               mptcp_get_options(skb, &mp_opt);
+               if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
                }
 
 create_msk:
-               new_msk = mptcp_sk_clone(listener->conn, &opt_rx, req);
+               new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
                fallback_is_fatal = true;
-               opt_rx.mptcp.mp_join = 0;
-               mptcp_get_options(skb, &opt_rx);
-               if (!opt_rx.mptcp.mp_join ||
-                   !subflow_hmac_valid(req, &opt_rx)) {
+               mptcp_get_options(skb, &mp_opt);
+               if (!mp_opt.mp_join ||
+                   !subflow_hmac_valid(req, &mp_opt)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
                        return NULL;
                }
@@ -473,9 +502,9 @@ create_child:
                        /* with OoO packets we can reach here without ingress
                         * mpc option
                         */
-                       ctx->remote_key = opt_rx.mptcp.sndr_key;
-                       ctx->fully_established = opt_rx.mptcp.mp_capable;
-                       ctx->can_ack = opt_rx.mptcp.mp_capable;
+                       ctx->remote_key = mp_opt.sndr_key;
+                       ctx->fully_established = mp_opt.mp_capable;
+                       ctx->can_ack = mp_opt.mp_capable;
                } else if (ctx->mp_join) {
                        struct mptcp_sock *owner;
 
@@ -499,7 +528,7 @@ out:
        /* check for expected invariant - should never trigger, just help
         * catching eariler subtle bugs
         */
-       WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp &&
+       WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
                     (!mptcp_subflow_ctx(child) ||
                      !mptcp_subflow_ctx(child)->conn));
        return child;
@@ -988,6 +1017,16 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
        if (err)
                return err;
 
+       /* the newly created socket really belongs to the owning MPTCP master
+        * socket, even if for additional subflows the allocation is performed
+        * by a kernel workqueue. Adjust inode references, so that the
+        * procfs/diag interaces really show this one belonging to the correct
+        * user.
+        */
+       SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
+       SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
+       SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
+
        subflow = mptcp_subflow_ctx(sf->sk);
        pr_debug("subflow=%p", subflow);
 
index cd747c0962fd609c738d51f913a42ef92e1b375b..5a67f79665742c3a2907baa7acab75cdec6ea32f 100644 (file)
@@ -59,7 +59,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
        /* Don't lookup sub-counters at all */
        opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
        if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
-               opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
+               opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
        list_for_each_entry_rcu(e, &map->members, list) {
                ret = ip_set_test(e->id, skb, par, opt);
                if (ret <= 0)
index c4582eb71766a13a66b4a7d82fb18dc9a10195af..bb72ca5f3999aa49b39aea824b6db1cfdb2dd46b 100644 (file)
@@ -1519,9 +1519,9 @@ __nf_conntrack_alloc(struct net *net,
        ct->status = 0;
        ct->timeout = 0;
        write_pnet(&ct->ct_net, net);
-       memset(&ct->__nfct_init_offset[0], 0,
+       memset(&ct->__nfct_init_offset, 0,
               offsetof(struct nf_conn, proto) -
-              offsetof(struct nf_conn, __nfct_init_offset[0]));
+              offsetof(struct nf_conn, __nfct_init_offset));
 
        nf_ct_zone_add(ct, zone);
 
@@ -2016,22 +2016,18 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
        nf_conntrack_get(skb_nfct(nskb));
 }
 
-static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
+                                struct nf_conn *ct,
+                                enum ip_conntrack_info ctinfo)
 {
        struct nf_conntrack_tuple_hash *h;
        struct nf_conntrack_tuple tuple;
-       enum ip_conntrack_info ctinfo;
        struct nf_nat_hook *nat_hook;
        unsigned int status;
-       struct nf_conn *ct;
        int dataoff;
        u16 l3num;
        u8 l4num;
 
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || nf_ct_is_confirmed(ct))
-               return 0;
-
        l3num = nf_ct_l3num(ct);
 
        dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
@@ -2088,6 +2084,76 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+/* This packet is coming from userspace via nf_queue, complete the packet
+ * processing after the helper invocation in nf_confirm().
+ */
+static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+                              enum ip_conntrack_info ctinfo)
+{
+       const struct nf_conntrack_helper *helper;
+       const struct nf_conn_help *help;
+       int protoff;
+
+       help = nfct_help(ct);
+       if (!help)
+               return 0;
+
+       helper = rcu_dereference(help->helper);
+       if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+               return 0;
+
+       switch (nf_ct_l3num(ct)) {
+       case NFPROTO_IPV4:
+               protoff = skb_network_offset(skb) + ip_hdrlen(skb);
+               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       case NFPROTO_IPV6: {
+               __be16 frag_off;
+               u8 pnum;
+
+               pnum = ipv6_hdr(skb)->nexthdr;
+               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+                                          &frag_off);
+               if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+                       return 0;
+               break;
+       }
+#endif
+       default:
+               return 0;
+       }
+
+       if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+           !nf_is_loopback_packet(skb)) {
+               if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+                       NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+                       return -1;
+               }
+       }
+
+       /* We've seen it coming out the other side: confirm it */
+       return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
+}
+
+static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
+       int err;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct)
+               return 0;
+
+       if (!nf_ct_is_confirmed(ct)) {
+               err = __nf_conntrack_update(net, skb, ct, ctinfo);
+               if (err < 0)
+                       return err;
+       }
+
+       return nf_confirm_cthelper(skb, ct, ctinfo);
+}
+
 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
                                       const struct sk_buff *skb)
 {
@@ -2139,8 +2205,19 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
                nf_conntrack_lock(lockp);
                if (*bucket < nf_conntrack_htable_size) {
                        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
-                               if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                               if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
                                        continue;
+                               /* All nf_conn objects are added to hash table twice, one
+                                * for original direction tuple, once for the reply tuple.
+                                *
+                                * Exception: In the IPS_NAT_CLASH case, only the reply
+                                * tuple is added (the original tuple already existed for
+                                * a different object).
+                                *
+                                * We only need to call the iterator once for each
+                                * conntrack, so we just use the 'reply' direction
+                                * tuple while iterating.
+                                */
                                ct = nf_ct_tuplehash_to_ctrack(h);
                                if (iter(ct, data))
                                        goto found;
index a971183f11af77ac1533de77a7778adab37d4758..1f44d523b5121c84057f9f66dc902203090dbd57 100644 (file)
@@ -72,24 +72,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
 
 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
 /* PptpControlMessageType names */
-const char *const pptp_msg_name[] = {
-       "UNKNOWN_MESSAGE",
-       "START_SESSION_REQUEST",
-       "START_SESSION_REPLY",
-       "STOP_SESSION_REQUEST",
-       "STOP_SESSION_REPLY",
-       "ECHO_REQUEST",
-       "ECHO_REPLY",
-       "OUT_CALL_REQUEST",
-       "OUT_CALL_REPLY",
-       "IN_CALL_REQUEST",
-       "IN_CALL_REPLY",
-       "IN_CALL_CONNECT",
-       "CALL_CLEAR_REQUEST",
-       "CALL_DISCONNECT_NOTIFY",
-       "WAN_ERROR_NOTIFY",
-       "SET_LINK_INFO"
+static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
+       [0]                             = "UNKNOWN_MESSAGE",
+       [PPTP_START_SESSION_REQUEST]    = "START_SESSION_REQUEST",
+       [PPTP_START_SESSION_REPLY]      = "START_SESSION_REPLY",
+       [PPTP_STOP_SESSION_REQUEST]     = "STOP_SESSION_REQUEST",
+       [PPTP_STOP_SESSION_REPLY]       = "STOP_SESSION_REPLY",
+       [PPTP_ECHO_REQUEST]             = "ECHO_REQUEST",
+       [PPTP_ECHO_REPLY]               = "ECHO_REPLY",
+       [PPTP_OUT_CALL_REQUEST]         = "OUT_CALL_REQUEST",
+       [PPTP_OUT_CALL_REPLY]           = "OUT_CALL_REPLY",
+       [PPTP_IN_CALL_REQUEST]          = "IN_CALL_REQUEST",
+       [PPTP_IN_CALL_REPLY]            = "IN_CALL_REPLY",
+       [PPTP_IN_CALL_CONNECT]          = "IN_CALL_CONNECT",
+       [PPTP_CALL_CLEAR_REQUEST]       = "CALL_CLEAR_REQUEST",
+       [PPTP_CALL_DISCONNECT_NOTIFY]   = "CALL_DISCONNECT_NOTIFY",
+       [PPTP_WAN_ERROR_NOTIFY]         = "WAN_ERROR_NOTIFY",
+       [PPTP_SET_LINK_INFO]            = "SET_LINK_INFO"
 };
+
+const char *pptp_msg_name(u_int16_t msg)
+{
+       if (msg > PPTP_MSG_MAX)
+               return pptp_msg_name_array[0];
+
+       return pptp_msg_name_array[msg];
+}
 EXPORT_SYMBOL(pptp_msg_name);
 #endif
 
@@ -276,7 +284,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
        typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
 
        msg = ntohs(ctlh->messageType);
-       pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
+       pr_debug("inbound control message %s\n", pptp_msg_name(msg));
 
        switch (msg) {
        case PPTP_START_SESSION_REPLY:
@@ -311,7 +319,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
                pcid = pptpReq->ocack.peersCallID;
                if (info->pns_call_id != pcid)
                        goto invalid;
-               pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+               pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
                         ntohs(cid), ntohs(pcid));
 
                if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
@@ -328,7 +336,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
                        goto invalid;
 
                cid = pptpReq->icreq.callID;
-               pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+               pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
                info->cstate = PPTP_CALL_IN_REQ;
                info->pac_call_id = cid;
                break;
@@ -347,7 +355,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
                if (info->pns_call_id != pcid)
                        goto invalid;
 
-               pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
+               pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
                info->cstate = PPTP_CALL_IN_CONF;
 
                /* we expect a GRE connection from PAC to PNS */
@@ -357,7 +365,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
        case PPTP_CALL_DISCONNECT_NOTIFY:
                /* server confirms disconnect */
                cid = pptpReq->disc.callID;
-               pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+               pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
                info->cstate = PPTP_CALL_NONE;
 
                /* untrack this call id, unexpect GRE packets */
@@ -384,7 +392,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
 invalid:
        pr_debug("invalid %s: type=%d cid=%u pcid=%u "
                 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
-                msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+                pptp_msg_name(msg),
                 msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
                 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
        return NF_ACCEPT;
@@ -404,7 +412,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
        typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
 
        msg = ntohs(ctlh->messageType);
-       pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
+       pr_debug("outbound control message %s\n", pptp_msg_name(msg));
 
        switch (msg) {
        case PPTP_START_SESSION_REQUEST:
@@ -426,7 +434,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
                info->cstate = PPTP_CALL_OUT_REQ;
                /* track PNS call id */
                cid = pptpReq->ocreq.callID;
-               pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+               pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
                info->pns_call_id = cid;
                break;
 
@@ -440,7 +448,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
                pcid = pptpReq->icack.peersCallID;
                if (info->pac_call_id != pcid)
                        goto invalid;
-               pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
+               pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
                         ntohs(cid), ntohs(pcid));
 
                if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
@@ -480,7 +488,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
 invalid:
        pr_debug("invalid %s: type=%d cid=%u pcid=%u "
                 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
-                msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+                pptp_msg_name(msg),
                 msg, ntohs(cid), ntohs(pcid),  info->cstate, info->sstate,
                 ntohs(info->pns_call_id), ntohs(info->pac_call_id));
        return NF_ACCEPT;
index 4344e572b7f96c65e7df179f3596be0ad5499c18..42da6e3372766841d69b506bef8885be2b9ddcc7 100644 (file)
@@ -284,7 +284,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
 
        if (nf_flow_has_expired(flow))
                flow_offload_fixup_ct(flow->ct);
-       else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
+       else
                flow_offload_fixup_ct_timeout(flow->ct);
 
        flow_offload_free(flow);
@@ -361,8 +361,10 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
 {
        struct nf_flowtable *flow_table = data;
 
-       if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
-           test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+       if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
+               set_bit(NF_FLOW_TEARDOWN, &flow->flags);
+
+       if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
                if (test_bit(NF_FLOW_HW, &flow->flags)) {
                        if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
                                nf_flow_offload_del(flow_table, flow);
index e3b099c14eff6457c3a152eeee488fe556762f9d..2276a73ccba24b67df26d2536ca59e9c4808472d 100644 (file)
@@ -817,6 +817,7 @@ static void flow_offload_work_handler(struct work_struct *work)
                        WARN_ON_ONCE(1);
        }
 
+       clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
        kfree(offload);
 }
 
@@ -831,9 +832,14 @@ nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
 {
        struct flow_offload_work *offload;
 
+       if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
+               return NULL;
+
        offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
-       if (!offload)
+       if (!offload) {
+               clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
                return NULL;
+       }
 
        offload->cmd = cmd;
        offload->flow = flow;
@@ -1056,7 +1062,7 @@ static struct flow_indr_block_entry block_ing_entry = {
 int nf_flow_table_offload_init(void)
 {
        nf_flow_offload_wq  = alloc_workqueue("nf_flow_table_offload",
-                                             WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+                                             WQ_UNBOUND, 0);
        if (!nf_flow_offload_wq)
                return -ENOMEM;
 
index 3d816a1e5442e6b2678fddc0f18f3e0545298fb1..59151dc07fdc14fe6fa640cdd0448723d96bcb48 100644 (file)
@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
                          enum nf_nat_manip_type maniptype)
 {
        struct udphdr *hdr;
-       bool do_csum;
 
        if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
                return false;
 
        hdr = (struct udphdr *)(skb->data + hdroff);
-       do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
+       __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
 
-       __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
        return true;
 }
 
index a5f294aa8e4cf9c3ef361d775de6f8707d2f1143..5b0d0a77379c64bce6c55ba185022b95d1554ec6 100644 (file)
@@ -103,7 +103,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
        if (help->helper->data_len == 0)
                return -EINVAL;
 
-       nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
+       nla_memcpy(help->data, attr, sizeof(help->data));
        return 0;
 }
 
@@ -240,6 +240,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
                ret = -ENOMEM;
                goto err2;
        }
+       helper->data_len = size;
 
        helper->flags |= NF_CT_HELPER_F_USERSPACE;
        memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
index 9f5dea0064ea86b52dbca0c1e127564be91a7cca..916a3c7f9eafe78b6a5507ae6bb5ac39f42e6ba7 100644 (file)
@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
 static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
                                                const struct sk_buff *skb,
                                                const struct iphdr *ip,
-                                               unsigned char *opts)
+                                               unsigned char *opts,
+                                               struct tcphdr *_tcph)
 {
        const struct tcphdr *tcp;
-       struct tcphdr _tcph;
 
-       tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
+       tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
        if (!tcp)
                return NULL;
 
@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
        int fmatch = FMATCH_WRONG;
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
+       struct tcphdr _tcph;
 
        memset(&ctx, 0, sizeof(ctx));
 
-       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
        if (!tcp)
                return false;
 
@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
        const struct nf_osf_finger *kf;
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
+       struct tcphdr _tcph;
 
        memset(&ctx, 0, sizeof(ctx));
 
-       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
        if (!tcp)
                return false;
 
index 3ffef454d4699f025fb390c7e60a5bc119e77887..62f416bc05796b230767e111d78eba5042ebfc7b 100644 (file)
@@ -79,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
                                parent = rcu_dereference_raw(parent->rb_left);
                                continue;
                        }
+
+                       if (nft_set_elem_expired(&rbe->ext))
+                               return false;
+
                        if (nft_rbtree_interval_end(rbe)) {
                                if (nft_set_is_anonymous(set))
                                        return false;
@@ -94,6 +98,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
 
        if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
            nft_set_elem_active(&interval->ext, genmask) &&
+           !nft_set_elem_expired(&interval->ext) &&
            nft_rbtree_interval_start(interval)) {
                *ext = &interval->ext;
                return true;
@@ -154,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
                                continue;
                        }
 
+                       if (nft_set_elem_expired(&rbe->ext))
+                               return false;
+
                        if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
                            (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
                            (flags & NFT_SET_ELEM_INTERVAL_END)) {
@@ -170,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
 
        if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
            nft_set_elem_active(&interval->ext, genmask) &&
+           !nft_set_elem_expired(&interval->ext) &&
            ((!nft_rbtree_interval_end(interval) &&
              !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
             (nft_rbtree_interval_end(interval) &&
@@ -418,6 +427,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
                if (iter->count < iter->skip)
                        goto cont;
+               if (nft_set_elem_expired(&rbe->ext))
+                       goto cont;
                if (!nft_set_elem_active(&rbe->ext, iter->genmask))
                        goto cont;
 
index 409a3ae47ce271fe924fa02e8fd14b7405e6b6c2..5e1239cef000588dff7963e2194ece26cadcf414 100644 (file)
@@ -734,6 +734,12 @@ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap,
        if ((off & (BITS_PER_LONG - 1)) != 0)
                return -EINVAL;
 
+       /* a null catmap is equivalent to an empty one */
+       if (!catmap) {
+               *offset = (u32)-1;
+               return 0;
+       }
+
        if (off < catmap->startbit) {
                off = catmap->startbit;
                *offset = off;
index e7d0fe3f43304433d315dddfafb9bf88e4396997..c5b3202a14cae757d39dba59e8fe42b16494aa13 100644 (file)
@@ -712,6 +712,10 @@ void qrtr_ns_init(void)
                goto err_sock;
        }
 
+       qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
+       if (!qrtr_ns.workqueue)
+               goto err_sock;
+
        qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
 
        sq.sq_port = QRTR_PORT_CTRL;
@@ -720,17 +724,13 @@ void qrtr_ns_init(void)
        ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq));
        if (ret < 0) {
                pr_err("failed to bind to socket\n");
-               goto err_sock;
+               goto err_wq;
        }
 
        qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
        qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
        qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
 
-       qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
-       if (!qrtr_ns.workqueue)
-               goto err_sock;
-
        ret = say_hello(&qrtr_ns.bcast_sq);
        if (ret < 0)
                goto err_wq;
index 7ed31b5e77e46060b9138e6c8e11cc18febffc24..2d8d6131bc5f7e23b168af2dcd377d55c278cd8e 100644 (file)
@@ -854,7 +854,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        }
        mutex_unlock(&qrtr_node_lock);
 
-       qrtr_local_enqueue(node, skb, type, from, to);
+       qrtr_local_enqueue(NULL, skb, type, from, to);
 
        return 0;
 }
index 6ffb7e9887ce116af440b495012e35a9ddc71779..ddd0f95713a945063589d1312a95a59eeeabda1b 100644 (file)
@@ -25,6 +25,7 @@ rxrpc-y := \
        peer_event.o \
        peer_object.o \
        recvmsg.o \
+       rtt.o \
        security.o \
        sendmsg.o \
        skbuff.o \
index 3eb1ab40ca5cb5933c68f0e87b9551d13f6eda5a..9fe264bec70ce74d1fab32dacd3cd84e90948aa0 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/atomic.h>
 #include <linux/seqlock.h>
+#include <linux/win_minmax.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/sock.h>
@@ -311,11 +312,14 @@ struct rxrpc_peer {
 #define RXRPC_RTT_CACHE_SIZE 32
        spinlock_t              rtt_input_lock; /* RTT lock for input routine */
        ktime_t                 rtt_last_req;   /* Time of last RTT request */
-       u64                     rtt;            /* Current RTT estimate (in nS) */
-       u64                     rtt_sum;        /* Sum of cache contents */
-       u64                     rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
-       u8                      rtt_cursor;     /* next entry at which to insert */
-       u8                      rtt_usage;      /* amount of cache actually used */
+       unsigned int            rtt_count;      /* Number of samples we've got */
+
+       u32                     srtt_us;        /* smoothed round trip time << 3 in usecs */
+       u32                     mdev_us;        /* medium deviation                     */
+       u32                     mdev_max_us;    /* maximal mdev for the last rtt period */
+       u32                     rttvar_us;      /* smoothed mdev_max                    */
+       u32                     rto_j;          /* Retransmission timeout in jiffies */
+       u8                      backoff;        /* Backoff timeout */
 
        u8                      cong_cwnd;      /* Congestion window size */
 };
@@ -1041,7 +1045,6 @@ extern unsigned long rxrpc_idle_ack_delay;
 extern unsigned int rxrpc_rx_window_size;
 extern unsigned int rxrpc_rx_mtu;
 extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned long rxrpc_resend_timeout;
 
 extern const s8 rxrpc_ack_priority[];
 
@@ -1069,8 +1072,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
  * peer_event.c
  */
 void rxrpc_error_report(struct sock *);
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
-                       rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
 void rxrpc_peer_keepalive_worker(struct work_struct *);
 
 /*
@@ -1102,6 +1103,14 @@ extern const struct seq_operations rxrpc_peer_seq_ops;
 void rxrpc_notify_socket(struct rxrpc_call *);
 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
 
+/*
+ * rtt.c
+ */
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+                       rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
+unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
+void rxrpc_peer_init_rtt(struct rxrpc_peer *);
+
 /*
  * rxkad.c
  */
index 70e44abf106c86c731162a7a40be9963ba9b25f4..b7611cc159e51da5366875962956525291e56c52 100644 (file)
@@ -248,7 +248,7 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        ktime_t now = skb->tstamp;
 
-       if (call->peer->rtt_usage < 3 ||
+       if (call->peer->rtt_count < 3 ||
            ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
                rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
                                  true, true,
index cedbbb3a7c2ea27ddef918ca0e3bcc690ddd8f3d..2a65ac41055f5f99e2cd15b7842c799118b42989 100644 (file)
@@ -111,8 +111,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
        } else {
                unsigned long now = jiffies, ack_at;
 
-               if (call->peer->rtt_usage > 0)
-                       ack_at = nsecs_to_jiffies(call->peer->rtt);
+               if (call->peer->srtt_us != 0)
+                       ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
                else
                        ack_at = expiry;
 
@@ -157,24 +157,18 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
 {
        struct sk_buff *skb;
-       unsigned long resend_at;
+       unsigned long resend_at, rto_j;
        rxrpc_seq_t cursor, seq, top;
-       ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
+       ktime_t now, max_age, oldest, ack_ts;
        int ix;
        u8 annotation, anno_type, retrans = 0, unacked = 0;
 
        _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
 
-       if (call->peer->rtt_usage > 1)
-               timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
-       else
-               timeout = ms_to_ktime(rxrpc_resend_timeout);
-       min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
-       if (ktime_before(timeout, min_timeo))
-               timeout = min_timeo;
+       rto_j = call->peer->rto_j;
 
        now = ktime_get_real();
-       max_age = ktime_sub(now, timeout);
+       max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
 
        spin_lock_bh(&call->lock);
 
@@ -219,7 +213,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
        }
 
        resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
-       resend_at += jiffies + rxrpc_resend_timeout;
+       resend_at += jiffies + rto_j;
        WRITE_ONCE(call->resend_at, resend_at);
 
        if (unacked)
@@ -234,7 +228,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                                        rxrpc_timer_set_for_resend);
                spin_unlock_bh(&call->lock);
                ack_ts = ktime_sub(now, call->acks_latest_ts);
-               if (ktime_to_ns(ack_ts) < call->peer->rtt)
+               if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
                        goto out;
                rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
                                  rxrpc_propose_ack_ping_for_lost_ack);
index 69e09d69c8964b43182321b14282ffa392cc3379..3be4177baf70761863ab9f8350f230da1d904d7b 100644 (file)
@@ -91,11 +91,11 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
                /* We analyse the number of packets that get ACK'd per RTT
                 * period and increase the window if we managed to fill it.
                 */
-               if (call->peer->rtt_usage == 0)
+               if (call->peer->rtt_count == 0)
                        goto out;
                if (ktime_before(skb->tstamp,
-                                ktime_add_ns(call->cong_tstamp,
-                                             call->peer->rtt)))
+                                ktime_add_us(call->cong_tstamp,
+                                             call->peer->srtt_us >> 3)))
                        goto out_no_clear_ca;
                change = rxrpc_cong_rtt_window_end;
                call->cong_tstamp = skb->tstamp;
@@ -802,6 +802,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
        }
 }
 
+/*
+ * Return true if the ACK is valid - ie. it doesn't appear to have regressed
+ * with respect to the ack state conveyed by preceding ACKs.
+ */
+static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
+                              rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
+{
+       rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
+
+       if (after(first_pkt, base))
+               return true; /* The window advanced */
+
+       if (before(first_pkt, base))
+               return false; /* firstPacket regressed */
+
+       if (after_eq(prev_pkt, call->ackr_prev_seq))
+               return true; /* previousPacket hasn't regressed. */
+
+       /* Some rx implementations put a serial number in previousPacket. */
+       if (after_eq(prev_pkt, base + call->tx_winsize))
+               return false;
+       return true;
+}
+
 /*
  * Process an ACK packet.
  *
@@ -865,9 +889,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
        /* Discard any out-of-order or duplicate ACKs (outside lock). */
-       if (before(first_soft_ack, call->ackr_first_seq) ||
-           before(prev_pkt, call->ackr_prev_seq))
+       if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+               trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+                                          first_soft_ack, call->ackr_first_seq,
+                                          prev_pkt, call->ackr_prev_seq);
                return;
+       }
 
        buf.info.rxMTU = 0;
        ioffset = offset + nr_acks + 3;
@@ -878,9 +905,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        spin_lock(&call->input_lock);
 
        /* Discard any out-of-order or duplicate ACKs (inside lock). */
-       if (before(first_soft_ack, call->ackr_first_seq) ||
-           before(prev_pkt, call->ackr_prev_seq))
+       if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+               trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+                                          first_soft_ack, call->ackr_first_seq,
+                                          prev_pkt, call->ackr_prev_seq);
                goto out;
+       }
        call->acks_latest_ts = skb->tstamp;
 
        call->ackr_first_seq = first_soft_ack;
index 214405f75346a3297704f28965272dc42d36656c..d4144fd86f847cfa855a511f948a41bae932e38d 100644 (file)
@@ -63,11 +63,6 @@ unsigned int rxrpc_rx_mtu = 5692;
  */
 unsigned int rxrpc_rx_jumbo_max = 4;
 
-/*
- * Time till packet resend (in milliseconds).
- */
-unsigned long rxrpc_resend_timeout = 4 * HZ;
-
 const s8 rxrpc_ack_priority[] = {
        [0]                             = 0,
        [RXRPC_ACK_DELAY]               = 1,
index 90e263c6aa69e441c5c9e1232bab6acc375a663b..f8b632a5c61979fbd02828e2e9b6945f8d274e8a 100644 (file)
@@ -369,7 +369,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
             retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
-            (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
+            (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
             ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
                          ktime_get_real())))
                whdr.flags |= RXRPC_REQUEST_ACK;
@@ -423,13 +423,10 @@ done:
                if (whdr.flags & RXRPC_REQUEST_ACK) {
                        call->peer->rtt_last_req = skb->tstamp;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
-                       if (call->peer->rtt_usage > 1) {
+                       if (call->peer->rtt_count > 1) {
                                unsigned long nowj = jiffies, ack_lost_at;
 
-                               ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
-                               if (ack_lost_at < 1)
-                                       ack_lost_at = 1;
-
+                               ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
                                ack_lost_at += nowj;
                                WRITE_ONCE(call->ack_lost_at, ack_lost_at);
                                rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
index 923b263c401b34ff8bd3ff46c24c3847fbe4ffec..b1449d971883613b026df3d65912a7b3dd0b4885 100644 (file)
@@ -295,52 +295,6 @@ static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
        }
 }
 
-/*
- * Add RTT information to cache.  This is called in softirq mode and has
- * exclusive access to the peer RTT data.
- */
-void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
-                       rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
-                       ktime_t send_time, ktime_t resp_time)
-{
-       struct rxrpc_peer *peer = call->peer;
-       s64 rtt;
-       u64 sum = peer->rtt_sum, avg;
-       u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
-
-       rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
-       if (rtt < 0)
-               return;
-
-       spin_lock(&peer->rtt_input_lock);
-
-       /* Replace the oldest datum in the RTT buffer */
-       sum -= peer->rtt_cache[cursor];
-       sum += rtt;
-       peer->rtt_cache[cursor] = rtt;
-       peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
-       peer->rtt_sum = sum;
-       if (usage < RXRPC_RTT_CACHE_SIZE) {
-               usage++;
-               peer->rtt_usage = usage;
-       }
-
-       spin_unlock(&peer->rtt_input_lock);
-
-       /* Now recalculate the average */
-       if (usage == RXRPC_RTT_CACHE_SIZE) {
-               avg = sum / RXRPC_RTT_CACHE_SIZE;
-       } else {
-               avg = sum;
-               do_div(avg, usage);
-       }
-
-       /* Don't need to update this under lock */
-       peer->rtt = avg;
-       trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
-                          usage, avg);
-}
-
 /*
  * Perform keep-alive pings.
  */
index 452163eadb98db828466f6210d6b6c167a5b71da..ca29976bb193efd37fb9bc7f83cbdd4aa51f1737 100644 (file)
@@ -225,6 +225,8 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                spin_lock_init(&peer->rtt_input_lock);
                peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
 
+               rxrpc_peer_init_rtt(peer);
+
                if (RXRPC_TX_SMSS > 2190)
                        peer->cong_cwnd = 2;
                else if (RXRPC_TX_SMSS > 1095)
@@ -497,14 +499,14 @@ void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
 EXPORT_SYMBOL(rxrpc_kernel_get_peer);
 
 /**
- * rxrpc_kernel_get_rtt - Get a call's peer RTT
+ * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
  * @sock: The socket on which the call is in progress.
  * @call: The call to query
  *
- * Get the call's peer RTT.
+ * Get the call's peer smoothed RTT.
  */
-u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call)
+u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
 {
-       return call->peer->rtt;
+       return call->peer->srtt_us >> 3;
 }
-EXPORT_SYMBOL(rxrpc_kernel_get_rtt);
+EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
index b9d053e42821b7a8591000a992b27683e83ed2f8..8b179e3c802a15d771750db86dc74ea2c544b7ed 100644 (file)
@@ -222,7 +222,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
                seq_puts(seq,
                         "Proto Local                                          "
                         " Remote                                         "
-                        " Use CW  MTU   LastUse          RTT Rc\n"
+                        " Use  CW   MTU LastUse      RTT      RTO\n"
                         );
                return 0;
        }
@@ -236,15 +236,15 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
        now = ktime_get_seconds();
        seq_printf(seq,
                   "UDP   %-47.47s %-47.47s %3u"
-                  " %3u %5u %6llus %12llu %2u\n",
+                  " %3u %5u %6llus %8u %8u\n",
                   lbuff,
                   rbuff,
                   atomic_read(&peer->usage),
                   peer->cong_cwnd,
                   peer->mtu,
                   now - peer->last_tx_at,
-                  peer->rtt,
-                  peer->rtt_cursor);
+                  peer->srtt_us >> 3,
+                  jiffies_to_usecs(peer->rto_j));
 
        return 0;
 }
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
new file mode 100644 (file)
index 0000000..928d8b3
--- /dev/null
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* RTT/RTO calculation.
+ *
+ * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com)
+ *
+ * https://tools.ietf.org/html/rfc6298
+ * https://tools.ietf.org/html/rfc1122#section-4.2.3.1
+ * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf
+ */
+
+#include <linux/net.h>
+#include "ar-internal.h"
+
+#define RXRPC_RTO_MAX  ((unsigned)(120 * HZ))
+#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ))  /* RFC6298 2.1 initial RTO value        */
+#define rxrpc_jiffies32 ((u32)jiffies)         /* As rxrpc_jiffies32 */
+#define rxrpc_min_rtt_wlen 300                 /* As sysctl_tcp_min_rtt_wlen */
+
+static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
+{
+       return 200;
+}
+
+static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
+{
+       return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
+}
+
+static u32 rxrpc_bound_rto(u32 rto)
+{
+       return min(rto, RXRPC_RTO_MAX);
+}
+
+/*
+ * Called to compute a smoothed rtt estimate. The data fed to this
+ * routine either comes from timestamps, or from segments that were
+ * known _not_ to have been retransmitted [see Karn/Partridge
+ * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
+ * piece by Van Jacobson.
+ * NOTE: the next three routines used to be one big routine.
+ * To save cycles in the RFC 1323 implementation it was better to break
+ * it up into three procedures. -- erics
+ */
+static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
+{
+       long m = sample_rtt_us; /* RTT */
+       u32 srtt = peer->srtt_us;
+
+       /*      The following amusing code comes from Jacobson's
+        *      article in SIGCOMM '88.  Note that rtt and mdev
+        *      are scaled versions of rtt and mean deviation.
+        *      This is designed to be as fast as possible
+        *      m stands for "measurement".
+        *
+        *      On a 1990 paper the rto value is changed to:
+        *      RTO = rtt + 4 * mdev
+        *
+        * Funny. This algorithm seems to be very broken.
+        * These formulae increase RTO, when it should be decreased, increase
+        * too slowly, when it should be increased quickly, decrease too quickly
+        * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
+        * does not matter how to _calculate_ it. Seems, it was trap
+        * that VJ failed to avoid. 8)
+        */
+       if (srtt != 0) {
+               m -= (srtt >> 3);       /* m is now error in rtt est */
+               srtt += m;              /* rtt = 7/8 rtt + 1/8 new */
+               if (m < 0) {
+                       m = -m;         /* m is now abs(error) */
+                       m -= (peer->mdev_us >> 2);   /* similar update on mdev */
+                       /* This is similar to one of Eifel findings.
+                        * Eifel blocks mdev updates when rtt decreases.
+                        * This solution is a bit different: we use finer gain
+                        * for mdev in this case (alpha*beta).
+                        * Like Eifel it also prevents growth of rto,
+                        * but also it limits too fast rto decreases,
+                        * happening in pure Eifel.
+                        */
+                       if (m > 0)
+                               m >>= 3;
+               } else {
+                       m -= (peer->mdev_us >> 2);   /* similar update on mdev */
+               }
+
+               peer->mdev_us += m;             /* mdev = 3/4 mdev + 1/4 new */
+               if (peer->mdev_us > peer->mdev_max_us) {
+                       peer->mdev_max_us = peer->mdev_us;
+                       if (peer->mdev_max_us > peer->rttvar_us)
+                               peer->rttvar_us = peer->mdev_max_us;
+               }
+       } else {
+               /* no previous measure. */
+               srtt = m << 3;          /* take the measured time to be rtt */
+               peer->mdev_us = m << 1; /* make sure rto = 3*rtt */
+               peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
+               peer->mdev_max_us = peer->rttvar_us;
+       }
+
+       peer->srtt_us = max(1U, srtt);
+}
+
+/*
+ * Calculate rto without backoff.  This is the second half of Van Jacobson's
+ * routine referred to above.
+ */
+static void rxrpc_set_rto(struct rxrpc_peer *peer)
+{
+       u32 rto;
+
+       /* 1. If rtt variance happened to be less 50msec, it is hallucination.
+        *    It cannot be less due to utterly erratic ACK generation made
+        *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
+        *    to do with delayed acks, because at cwnd>2 true delack timeout
+        *    is invisible. Actually, Linux-2.4 also generates erratic
+        *    ACKs in some circumstances.
+        */
+       rto = __rxrpc_set_rto(peer);
+
+       /* 2. Fixups made earlier cannot be right.
+        *    If we do not estimate RTO correctly without them,
+        *    all the algo is pure shit and should be replaced
+        *    with correct one. It is exactly, which we pretend to do.
+        */
+
+       /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo
+        * guarantees that rto is higher.
+        */
+       peer->rto_j = rxrpc_bound_rto(rto);
+}
+
+static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
+{
+       if (rtt_us < 0)
+               return;
+
+       //rxrpc_update_rtt_min(peer, rtt_us);
+       rxrpc_rtt_estimator(peer, rtt_us);
+       rxrpc_set_rto(peer);
+
+       /* RFC6298: only reset backoff on valid RTT measurement. */
+       peer->backoff = 0;
+}
+
+/*
+ * Add RTT information to cache.  This is called in softirq mode and has
+ * exclusive access to the peer RTT data.
+ */
+void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+                       rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+                       ktime_t send_time, ktime_t resp_time)
+{
+       struct rxrpc_peer *peer = call->peer;
+       s64 rtt_us;
+
+       rtt_us = ktime_to_us(ktime_sub(resp_time, send_time));
+       if (rtt_us < 0)
+               return;
+
+       spin_lock(&peer->rtt_input_lock);
+       rxrpc_ack_update_rtt(peer, rtt_us);
+       if (peer->rtt_count < 3)
+               peer->rtt_count++;
+       spin_unlock(&peer->rtt_input_lock);
+
+       trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
+                          peer->srtt_us >> 3, peer->rto_j);
+}
+
+/*
+ * Get the retransmission timeout to set in jiffies, backing it off each time
+ * we retransmit.
+ */
+unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
+{
+       u64 timo_j;
+       u8 backoff = READ_ONCE(peer->backoff);
+
+       timo_j = peer->rto_j;
+       timo_j <<= backoff;
+       if (retrans && timo_j * 2 <= RXRPC_RTO_MAX)
+               WRITE_ONCE(peer->backoff, backoff + 1);
+
+       if (timo_j < 1)
+               timo_j = 1;
+
+       return timo_j;
+}
+
+void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
+{
+       peer->rto_j     = RXRPC_TIMEOUT_INIT;
+       peer->mdev_us   = jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
+       peer->backoff   = 0;
+       //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U);
+}
index 098f1f9ec53ba10642dc0c3d2c608a44de688e5b..52a24d4ef5d8a841a6e0f1ad58c14873ae03e666 100644 (file)
@@ -1148,7 +1148,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
                                   &expiry, _abort_code);
        if (ret < 0)
-               goto temporary_error_free_resp;
+               goto temporary_error_free_ticket;
 
        /* use the session key from inside the ticket to decrypt the
         * response */
@@ -1230,7 +1230,6 @@ protocol_error:
 
 temporary_error_free_ticket:
        kfree(ticket);
-temporary_error_free_resp:
        kfree(response);
 temporary_error:
        /* Ignore the response packet if we got a temporary error such as
index 0fcf157aa09f8350b156234693b700ac3a89dbcf..5e9c43d4a314ba854acbc3d9f01f70100f0b0b3f 100644 (file)
@@ -66,15 +66,14 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                                            struct rxrpc_call *call)
 {
        rxrpc_seq_t tx_start, tx_win;
-       signed long rtt2, timeout;
-       u64 rtt;
+       signed long rtt, timeout;
 
-       rtt = READ_ONCE(call->peer->rtt);
-       rtt2 = nsecs_to_jiffies64(rtt) * 2;
-       if (rtt2 < 2)
-               rtt2 = 2;
+       rtt = READ_ONCE(call->peer->srtt_us) >> 3;
+       rtt = usecs_to_jiffies(rtt) * 2;
+       if (rtt < 2)
+               rtt = 2;
 
-       timeout = rtt2;
+       timeout = rtt;
        tx_start = READ_ONCE(call->tx_hard_ack);
 
        for (;;) {
@@ -92,7 +91,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                        return -EINTR;
 
                if (tx_win != tx_start) {
-                       timeout = rtt2;
+                       timeout = rtt;
                        tx_start = tx_win;
                }
 
@@ -271,16 +270,9 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                _debug("need instant resend %d", ret);
                rxrpc_instant_resend(call, ix);
        } else {
-               unsigned long now = jiffies, resend_at;
+               unsigned long now = jiffies;
+               unsigned long resend_at = now + call->peer->rto_j;
 
-               if (call->peer->rtt_usage > 1)
-                       resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
-               else
-                       resend_at = rxrpc_resend_timeout;
-               if (resend_at < 1)
-                       resend_at = 1;
-
-               resend_at += now;
                WRITE_ONCE(call->resend_at, resend_at);
                rxrpc_reduce_call_timer(call, resend_at, now,
                                        rxrpc_timer_set_for_send);
index 2bbb38161851a48965272ef2984e23f76b4cb308..18dade4e6f9a0caa4143fcb44b7b697c9e6b2745 100644 (file)
@@ -71,15 +71,6 @@ static struct ctl_table rxrpc_sysctl_table[] = {
                .extra1         = (void *)&one_jiffy,
                .extra2         = (void *)&max_jiffies,
        },
-       {
-               .procname       = "resend_timeout",
-               .data           = &rxrpc_resend_timeout,
-               .maxlen         = sizeof(unsigned long),
-               .mode           = 0644,
-               .proc_handler   = proc_doulongvec_ms_jiffies_minmax,
-               .extra1         = (void *)&one_jiffy,
-               .extra2         = (void *)&max_jiffies,
-       },
 
        /* Non-time values */
        {
index 1a766393be625a694a85d40d10e2fe990c4fdbb7..20577355235a65769f71555ba357c4a027e5c5bc 100644 (file)
@@ -199,6 +199,9 @@ static int tcf_ct_flow_table_add_action_nat(struct net *net,
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
        struct nf_conntrack_tuple target;
 
+       if (!(ct->status & IPS_NAT_MASK))
+               return 0;
+
        nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
 
        switch (tuple->src.l3num) {
index 55bd1429678f996a328f2be6fdb0b7e0de771168..0a7ecc292bd31b6dbbc98c2c8ef29d86745f7656 100644 (file)
@@ -2070,6 +2070,7 @@ replay:
                err = PTR_ERR(block);
                goto errout;
        }
+       block->classid = parent;
 
        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
        if (chain_index > TC_ACT_EXT_VAL_MASK) {
@@ -2612,12 +2613,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                        return skb->len;
 
                parent = tcm->tcm_parent;
-               if (!parent) {
+               if (!parent)
                        q = dev->qdisc;
-                       parent = q->handle;
-               } else {
+               else
                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
-               }
                if (!q)
                        goto out;
                cops = q->ops->cl_ops;
@@ -2633,6 +2632,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                block = cops->tcf_block(q, cl, NULL);
                if (!block)
                        goto out;
+               parent = block->classid;
                if (tcf_block_shared(block))
                        q = NULL;
        }
@@ -3523,6 +3523,16 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
 #endif
 }
 
+static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
+{
+       if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
+               return FLOW_ACTION_HW_STATS_DONT_CARE;
+       else if (!hw_stats)
+               return FLOW_ACTION_HW_STATS_DISABLED;
+
+       return hw_stats;
+}
+
 int tc_setup_flow_action(struct flow_action *flow_action,
                         const struct tcf_exts *exts)
 {
@@ -3546,7 +3556,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                if (err)
                        goto err_out_locked;
 
-               entry->hw_stats = act->hw_stats;
+               entry->hw_stats = tc_act_hw_stats(act->hw_stats);
 
                if (is_tcf_gact_ok(act)) {
                        entry->id = FLOW_ACTION_ACCEPT;
@@ -3614,7 +3624,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                                entry->mangle.mask = tcf_pedit_mask(act, k);
                                entry->mangle.val = tcf_pedit_val(act, k);
                                entry->mangle.offset = tcf_pedit_offset(act, k);
-                               entry->hw_stats = act->hw_stats;
+                               entry->hw_stats = tc_act_hw_stats(act->hw_stats);
                                entry = &flow_action->entries[++j];
                        }
                } else if (is_tcf_csum(act)) {
index a36974e9c601eca97ea98501f0c05f75d0a9fe3e..1bcf8fbfd40e4c56baddd8b8f0fe1bb804cc5b4e 100644 (file)
@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
 
        sch->q.qlen = 0;
        sch->qstats.backlog = 0;
-       memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+       if (q->tab)
+               memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
        q->head = q->tail = 0;
        red_restart(&q->vars);
 }
index 968519ff36e97734e495d90331d3e3197660b8f6..436160be9c180ceedaeb74126e40c7e24bf7ccba 100644 (file)
@@ -416,7 +416,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
                q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
 
        if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
-               q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+               q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
 
        if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
                q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
index a9da8776bf5b59ca1195006fc30c94b877071801..fb760cee824e4cb1807651346d333e2d4590a663 100644 (file)
@@ -297,9 +297,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
                        goto flow_error;
                }
                q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
-               if (!q->flows_cnt || q->flows_cnt > 65536) {
+               if (!q->flows_cnt || q->flows_cnt >= 65536) {
                        NL_SET_ERR_MSG_MOD(extack,
-                                          "Number of flows must be < 65536");
+                                          "Number of flows must range in [1..65535]");
                        goto flow_error;
                }
        }
index c787d4d46017b4b41b8eb6d41f2b0a44560ff5bf..5a6def5e4e6df2e7b66c88aa877c7318270d48be 100644 (file)
@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (ctl->divisor &&
            (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
                return -EINVAL;
+
+       /* slot->allot is a short, make sure quantum is not too big. */
+       if (ctl->quantum) {
+               unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
+
+               if (scaled <= 0 || scaled > SHRT_MAX)
+                       return -EINVAL;
+       }
+
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
                                        ctl_v1->Wlog))
                return -EINVAL;
index 0fb10abf757962c1e3e6feb8c68dedd9a1808f1b..7a5e4c454715617cb57c6db7de7fdaa9e6886d40 100644 (file)
@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
 {
        struct tc_skbprio_qopt *ctl = nla_data(opt);
 
+       if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
+               return -EINVAL;
+
        sch->limit = ctl->limit;
        return 0;
 }
index 6e2eb1dd64ed0fef50c581d6a334b0f599cf6b99..68934438ee19c62e80eda2eb9ab222d6af4e8b80 100644 (file)
@@ -31,7 +31,7 @@ menuconfig IP_SCTP
          homing at either or both ends of an association."
 
          To compile this protocol support as a module, choose M here: the
-         module will be called sctp. Debug messages are handeled by the
+         module will be called sctp. Debug messages are handled by the
          kernel's dynamic debugging framework.
 
          If in doubt, say N.
index 2bc29463e1dcf15b99a81ffa24c9a2d727e670dc..9f36fe911d082e9fa3e3de7096d56323a93d4ba6 100644 (file)
@@ -1523,9 +1523,17 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
                        timeout = asoc->timeouts[cmd->obj.to];
                        BUG_ON(!timeout);
 
-                       timer->expires = jiffies + timeout;
-                       sctp_association_hold(asoc);
-                       add_timer(timer);
+                       /*
+                        * SCTP has a hard time with timer starts.  Because we process
+                        * timer starts as side effects, it can be hard to tell if we
+                        * have already started a timer or not, which leads to BUG
+                        * halts when we call add_timer. So here, instead of just starting
+                        * a timer, if the timer is already started, and just mod
+                        * the timer with the shorter of the two expiration times
+                        */
+                       if (!timer_pending(timer))
+                               sctp_association_hold(asoc);
+                       timer_reduce(timer, jiffies + timeout);
                        break;
 
                case SCTP_CMD_TIMER_RESTART:
index 26788f4a3b9eb66091e03dec907a8b4e4ddc8554..e86620fbd90fd0ecc80b499c6ae058aa4476cc99 100644 (file)
@@ -1856,12 +1856,13 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
        /* Update the content of current association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
-       if (sctp_state(asoc, SHUTDOWN_PENDING) &&
+       if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
+            sctp_state(asoc, SHUTDOWN_SENT)) &&
            (sctp_sstate(asoc->base.sk, CLOSING) ||
             sock_flag(asoc->base.sk, SOCK_DEAD))) {
-               /* if were currently in SHUTDOWN_PENDING, but the socket
-                * has been closed by user, don't transition to ESTABLISHED.
-                * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
+               /* If the socket has been closed by user, don't
+                * transition to ESTABLISHED. Instead trigger SHUTDOWN
+                * bundled with COOKIE_ACK.
                 */
                sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
                return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
index c82dbdcf13f2ffe13d10a2acac67ed26b1f6c0ea..77d5c36a8991c853bcd05a35bcd8ba48385cb165 100644 (file)
@@ -343,6 +343,9 @@ void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
        struct sockaddr_storage addr;
        struct sctp_ulpevent *event;
 
+       if (asoc->state < SCTP_STATE_ESTABLISHED)
+               return;
+
        memset(&addr, 0, sizeof(struct sockaddr_storage));
        memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
 
index 25fbd8d9de74c710c215f29043277e21e18d57cf..ac5cac0dd24b98edcc6cff89ab174329131de7c9 100644 (file)
@@ -2032,7 +2032,6 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
        struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
        struct kvec *head = rqstp->rq_rcv_buf.head;
        struct rpc_auth *auth = cred->cr_auth;
-       unsigned int savedlen = rcv_buf->len;
        u32 offset, opaque_len, maj_stat;
        __be32 *p;
 
@@ -2043,9 +2042,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
        offset = (u8 *)(p) - (u8 *)head->iov_base;
        if (offset + opaque_len > rcv_buf->len)
                goto unwrap_failed;
-       rcv_buf->len = offset + opaque_len;
 
-       maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
+       maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset,
+                             offset + opaque_len, rcv_buf);
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
        if (maj_stat != GSS_S_COMPLETE)
@@ -2059,10 +2058,9 @@ gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
         */
        xdr_init_decode(xdr, rcv_buf, p, rqstp);
 
-       auth->au_rslack = auth->au_verfsize + 2 +
-                         XDR_QUADLEN(savedlen - rcv_buf->len);
-       auth->au_ralign = auth->au_verfsize + 2 +
-                         XDR_QUADLEN(savedlen - rcv_buf->len);
+       auth->au_rslack = auth->au_verfsize + 2 + ctx->gc_gss_ctx->slack;
+       auth->au_ralign = auth->au_verfsize + 2 + ctx->gc_gss_ctx->align;
+
        return 0;
 unwrap_failed:
        trace_rpcgss_unwrap_failed(task);
index 6f2d30d7b766d510cdbe1c2a932791338088ac28..e7180da1fc6a144742eb2f48f2ad2750773b72b1 100644 (file)
@@ -851,8 +851,8 @@ out_err:
 }
 
 u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
-                    u32 *headskip, u32 *tailskip)
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
+                    struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
 {
        struct xdr_buf subbuf;
        u32 ret = 0;
@@ -881,7 +881,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
 
        /* create a segment skipping the header and leaving out the checksum */
        xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
-                                   (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
+                                   (len - offset - GSS_KRB5_TOK_HDR_LEN -
                                     kctx->gk5e->cksumlength));
 
        nblocks = (subbuf.len + blocksize - 1) / blocksize;
@@ -926,7 +926,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
                goto out_err;
 
        /* Get the packet's hmac value */
-       ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
+       ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
                                      pkt_hmac, kctx->gk5e->cksumlength);
        if (ret)
                goto out_err;
index 6c1920eed7717968d5651b68d132d6fe9531a7c8..cf0fd170ac1893193f79bdf78204343ae02f4ad6 100644 (file)
@@ -261,7 +261,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 }
 
 static u32
-gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
+                      struct xdr_buf *buf, unsigned int *slack,
+                      unsigned int *align)
 {
        int                     signalg;
        int                     sealalg;
@@ -279,12 +281,13 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        u32                     conflen = kctx->gk5e->conflen;
        int                     crypt_offset;
        u8                      *cksumkey;
+       unsigned int            saved_len = buf->len;
 
        dprintk("RPC:       gss_unwrap_kerberos\n");
 
        ptr = (u8 *)buf->head[0].iov_base + offset;
        if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
-                                       buf->len - offset))
+                                       len - offset))
                return GSS_S_DEFECTIVE_TOKEN;
 
        if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
@@ -324,6 +327,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
            (!kctx->initiate && direction != 0))
                return GSS_S_BAD_SIG;
 
+       buf->len = len;
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
                struct crypto_sync_skcipher *cipher;
                int err;
@@ -376,11 +380,15 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
        memmove(orig_start, data_start, data_len);
        buf->head[0].iov_len -= (data_start - orig_start);
-       buf->len -= (data_start - orig_start);
+       buf->len = len - (data_start - orig_start);
 
        if (gss_krb5_remove_padding(buf, blocksize))
                return GSS_S_DEFECTIVE_TOKEN;
 
+       /* slack must include room for krb5 padding */
+       *slack = XDR_QUADLEN(saved_len - buf->len);
+       /* The GSS blob always precedes the RPC message payload */
+       *align = *slack;
        return GSS_S_COMPLETE;
 }
 
@@ -486,7 +494,9 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
 }
 
 static u32
-gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
+gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
+                      struct xdr_buf *buf, unsigned int *slack,
+                      unsigned int *align)
 {
        time64_t        now;
        u8              *ptr;
@@ -532,7 +542,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        if (rrc != 0)
                rotate_left(offset + 16, buf, rrc);
 
-       err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
+       err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
                                        &headskip, &tailskip);
        if (err)
                return GSS_S_FAILURE;
@@ -542,7 +552,7 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
         * it against the original
         */
        err = read_bytes_from_xdr_buf(buf,
-                               buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
+                               len - GSS_KRB5_TOK_HDR_LEN - tailskip,
                                decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
        if (err) {
                dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
@@ -568,18 +578,19 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
         * Note that buf->head[0].iov_len may indicate the available
         * head buffer space rather than that actually occupied.
         */
-       movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
+       movelen = min_t(unsigned int, buf->head[0].iov_len, len);
        movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
-       if (offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
-           buf->head[0].iov_len)
-               return GSS_S_FAILURE;
+       BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
+                                                       buf->head[0].iov_len);
        memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
        buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
-       buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
+       buf->len = len - GSS_KRB5_TOK_HDR_LEN + headskip;
 
        /* Trim off the trailing "extra count" and checksum blob */
-       buf->len -= ec + GSS_KRB5_TOK_HDR_LEN + tailskip;
+       xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
 
+       *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
+       *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
        return GSS_S_COMPLETE;
 }
 
@@ -603,7 +614,8 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
 }
 
 u32
-gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
+gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
+                   int len, struct xdr_buf *buf)
 {
        struct krb5_ctx *kctx = gctx->internal_ctx_id;
 
@@ -613,9 +625,11 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
        case ENCTYPE_DES_CBC_RAW:
        case ENCTYPE_DES3_CBC_RAW:
        case ENCTYPE_ARCFOUR_HMAC:
-               return gss_unwrap_kerberos_v1(kctx, offset, buf);
+               return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
+                                             &gctx->slack, &gctx->align);
        case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
        case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
-               return gss_unwrap_kerberos_v2(kctx, offset, buf);
+               return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
+                                             &gctx->slack, &gctx->align);
        }
 }
index db550bfc2642eea9e12bc845239009996a73c599..69316ab1b9fac526b4d6d1e23336906d7141d298 100644 (file)
@@ -411,10 +411,11 @@ gss_wrap(struct gss_ctx   *ctx_id,
 u32
 gss_unwrap(struct gss_ctx      *ctx_id,
           int                  offset,
+          int                  len,
           struct xdr_buf       *buf)
 {
        return ctx_id->mech_type->gm_ops
-               ->gss_unwrap(ctx_id, offset, buf);
+               ->gss_unwrap(ctx_id, offset, len, buf);
 }
 
 
index 54ae5be62f6a5d878878ffb396870a76ebff2d7a..50d93c49ef1af670a0cd5bd82a4d69abaa926740 100644 (file)
@@ -906,7 +906,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
        if (svc_getnl(&buf->head[0]) != seq)
                goto out;
        /* trim off the mic and padding at the end before returning */
-       buf->len -= 4 + round_up_to_quad(mic.len);
+       xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
        stat = 0;
 out:
        kfree(mic.data);
@@ -934,7 +934,7 @@ static int
 unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
 {
        u32 priv_len, maj_stat;
-       int pad, saved_len, remaining_len, offset;
+       int pad, remaining_len, offset;
 
        clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
@@ -954,12 +954,8 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
        buf->len -= pad;
        fix_priv_head(buf, pad);
 
-       /* Maybe it would be better to give gss_unwrap a length parameter: */
-       saved_len = buf->len;
-       buf->len = priv_len;
-       maj_stat = gss_unwrap(ctx, 0, buf);
+       maj_stat = gss_unwrap(ctx, 0, priv_len, buf);
        pad = priv_len - buf->len;
-       buf->len = saved_len;
        buf->len -= pad;
        /* The upper layers assume the buffer is aligned on 4-byte boundaries.
         * In the krb5p case, at least, the data ends up offset, so we need to
index 325a0858700f6c4eba5d9a0c0fe1c7800d42d541..61b21dafd7c0da1538ec8705d78372d67090dbb2 100644 (file)
@@ -880,6 +880,22 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
 /*
  * Free an RPC client
  */
+static void rpc_free_client_work(struct work_struct *work)
+{
+       struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
+
+       /* These might block on processes that might allocate memory,
+        * so they cannot be called in rpciod, so they are handled separately
+        * here.
+        */
+       rpc_clnt_debugfs_unregister(clnt);
+       rpc_free_clid(clnt);
+       rpc_clnt_remove_pipedir(clnt);
+       xprt_put(rcu_dereference_raw(clnt->cl_xprt));
+
+       kfree(clnt);
+       rpciod_down();
+}
 static struct rpc_clnt *
 rpc_free_client(struct rpc_clnt *clnt)
 {
@@ -890,17 +906,14 @@ rpc_free_client(struct rpc_clnt *clnt)
                        rcu_dereference(clnt->cl_xprt)->servername);
        if (clnt->cl_parent != clnt)
                parent = clnt->cl_parent;
-       rpc_clnt_debugfs_unregister(clnt);
-       rpc_clnt_remove_pipedir(clnt);
        rpc_unregister_client(clnt);
        rpc_free_iostats(clnt->cl_metrics);
        clnt->cl_metrics = NULL;
-       xprt_put(rcu_dereference_raw(clnt->cl_xprt));
        xprt_iter_destroy(&clnt->cl_xpi);
-       rpciod_down();
        put_cred(clnt->cl_cred);
-       rpc_free_clid(clnt);
-       kfree(clnt);
+
+       INIT_WORK(&clnt->cl_work, rpc_free_client_work);
+       schedule_work(&clnt->cl_work);
        return parent;
 }
 
@@ -2420,6 +2433,11 @@ rpc_check_timeout(struct rpc_task *task)
 {
        struct rpc_clnt *clnt = task->tk_client;
 
+       if (RPC_SIGNALLED(task)) {
+               rpc_call_rpcerror(task, -ERESTARTSYS);
+               return;
+       }
+
        if (xprt_adjust_timeout(task->tk_rqstp) == 0)
                return;
 
@@ -2808,8 +2826,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
        task = rpc_call_null_helper(clnt, xprt, NULL,
                        RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
                        &rpc_cb_add_xprt_call_ops, data);
-       if (IS_ERR(task))
-               return PTR_ERR(task);
+
        rpc_put_task(task);
 success:
        return 1;
index 15b58c5144f9eeaccb34a0525eaedac41f520f37..6f7d82fb1eb0a6748e37ab4e57d50650af7caa28 100644 (file)
@@ -1150,6 +1150,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 }
 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 
+/**
+ * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
+ * @buf: buf to be trimmed
+ * @len: number of bytes to reduce "buf" by
+ *
+ * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
+ * that it's possible that we'll trim less than that amount if the xdr_buf is
+ * too small, or if (for instance) it's all in the head and the parser has
+ * already read too far into it.
+ */
+void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
+{
+       size_t cur;
+       unsigned int trim = len;
+
+       if (buf->tail[0].iov_len) {
+               cur = min_t(size_t, buf->tail[0].iov_len, trim);
+               buf->tail[0].iov_len -= cur;
+               trim -= cur;
+               if (!trim)
+                       goto fix_len;
+       }
+
+       if (buf->page_len) {
+               cur = min_t(unsigned int, buf->page_len, trim);
+               buf->page_len -= cur;
+               trim -= cur;
+               if (!trim)
+                       goto fix_len;
+       }
+
+       if (buf->head[0].iov_len) {
+               cur = min_t(size_t, buf->head[0].iov_len, trim);
+               buf->head[0].iov_len -= cur;
+               trim -= cur;
+       }
+fix_len:
+       buf->len -= (len - trim);
+}
+EXPORT_SYMBOL_GPL(xdr_buf_trim);
+
 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 {
        unsigned int this_len;
index 4a81e6995d3e46442e68d97fbdd0cc1344255f11..3c627dc685cc85e5f415e2248d2ac95d416b84c4 100644 (file)
@@ -388,7 +388,9 @@ static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
        } while (nsegs);
 
 done:
-       return xdr_stream_encode_item_absent(xdr);
+       if (xdr_stream_encode_item_absent(xdr) < 0)
+               return -EMSGSIZE;
+       return 0;
 }
 
 /* Register and XDR encode the Write list. Supports encoding a list
@@ -454,7 +456,9 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
        *segcount = cpu_to_be32(nchunks);
 
 done:
-       return xdr_stream_encode_item_absent(xdr);
+       if (xdr_stream_encode_item_absent(xdr) < 0)
+               return -EMSGSIZE;
+       return 0;
 }
 
 /* Register and XDR encode the Reply chunk. Supports encoding an array
@@ -480,8 +484,11 @@ static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
        int nsegs, nchunks;
        __be32 *segcount;
 
-       if (wtype != rpcrdma_replych)
-               return xdr_stream_encode_item_absent(xdr);
+       if (wtype != rpcrdma_replych) {
+               if (xdr_stream_encode_item_absent(xdr) < 0)
+                       return -EMSGSIZE;
+               return 0;
+       }
 
        seg = req->rl_segments;
        nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
index cdd84c09df10b4a218953c0f07287ca7cec95cb9..05c4d3a9cda27fc6971bb40d0f088e76acd93df4 100644 (file)
@@ -289,6 +289,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_DISCONNECTED:
                ep->re_connect_status = -ECONNABORTED;
 disconnected:
+               xprt_force_disconnect(xprt);
                return rpcrdma_ep_destroy(ep);
        default:
                break;
@@ -1355,8 +1356,8 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
                --ep->re_send_count;
        }
 
+       trace_xprtrdma_post_send(req);
        rc = frwr_send(r_xprt, req);
-       trace_xprtrdma_post_send(req, rc);
        if (rc)
                return -ENOTCONN;
        return 0;
index 87466607097f18f5d6ea658e7123cf7ae949d775..e370ad0edd768c6826be94d20cd37b1a9e2f66e0 100644 (file)
@@ -1739,22 +1739,21 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
        return 0;
 }
 
-static void tipc_sk_send_ack(struct tipc_sock *tsk)
+static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
 {
        struct sock *sk = &tsk->sk;
-       struct net *net = sock_net(sk);
        struct sk_buff *skb = NULL;
        struct tipc_msg *msg;
        u32 peer_port = tsk_peer_port(tsk);
        u32 dnode = tsk_peer_node(tsk);
 
        if (!tipc_sk_connected(sk))
-               return;
+               return NULL;
        skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
                              dnode, tsk_own_node(tsk), peer_port,
                              tsk->portid, TIPC_OK);
        if (!skb)
-               return;
+               return NULL;
        msg = buf_msg(skb);
        msg_set_conn_ack(msg, tsk->rcv_unacked);
        tsk->rcv_unacked = 0;
@@ -1764,7 +1763,19 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
                tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
                msg_set_adv_win(msg, tsk->rcv_win);
        }
-       tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+       return skb;
+}
+
+static void tipc_sk_send_ack(struct tipc_sock *tsk)
+{
+       struct sk_buff *skb;
+
+       skb = tipc_sk_build_ack(tsk);
+       if (!skb)
+               return;
+
+       tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
+                          msg_link_selector(buf_msg(skb)));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1938,7 +1949,6 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
        bool peek = flags & MSG_PEEK;
        int offset, required, copy, copied = 0;
        int hlen, dlen, err, rc;
-       bool ack = false;
        long timeout;
 
        /* Catch invalid receive attempts */
@@ -1983,7 +1993,6 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
 
                /* Copy data if msg ok, otherwise return error/partial data */
                if (likely(!err)) {
-                       ack = msg_ack_required(hdr);
                        offset = skb_cb->bytes_read;
                        copy = min_t(int, dlen - offset, buflen - copied);
                        rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
@@ -2011,7 +2020,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
 
                /* Send connection flow control advertisement when applicable */
                tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
-               if (ack || tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
+               if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
                        tipc_sk_send_ack(tsk);
 
                /* Exit if all requested data or FIN/error received */
@@ -2105,9 +2114,11 @@ static void tipc_sk_proto_rcv(struct sock *sk,
  * tipc_sk_filter_connect - check incoming message for a connection-based socket
  * @tsk: TIPC socket
  * @skb: pointer to message buffer.
+ * @xmitq: for Nagle ACK if any
  * Returns true if message should be added to receive queue, false otherwise
  */
-static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
+static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
+                                  struct sk_buff_head *xmitq)
 {
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
@@ -2171,8 +2182,17 @@ static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
                if (!skb_queue_empty(&sk->sk_write_queue))
                        tipc_sk_push_backlog(tsk);
                /* Accept only connection-based messages sent by peer */
-               if (likely(con_msg && !err && pport == oport && pnode == onode))
+               if (likely(con_msg && !err && pport == oport &&
+                          pnode == onode)) {
+                       if (msg_ack_required(hdr)) {
+                               struct sk_buff *skb;
+
+                               skb = tipc_sk_build_ack(tsk);
+                               if (skb)
+                                       __skb_queue_tail(xmitq, skb);
+                       }
                        return true;
+               }
                if (!tsk_peer_msg(tsk, hdr))
                        return false;
                if (!err)
@@ -2267,7 +2287,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
        while ((skb = __skb_dequeue(&inputq))) {
                hdr = buf_msg(skb);
                limit = rcvbuf_limit(sk, skb);
-               if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
+               if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
                    (!sk_conn && msg_connected(hdr)) ||
                    (!grp && msg_in_group(hdr)))
                        err = TIPC_ERR_NO_PORT;
index aa015c233898c512aa07ae305d200ce0c89258c8..6ebbec1bedd1af7a4120942e9c7689d4138da8d2 100644 (file)
@@ -96,6 +96,16 @@ void tipc_sub_get(struct tipc_subscription *subscription);
                (swap_ ? swab32(val__) : val__);                        \
        })
 
+/* tipc_sub_write - write val_ to field_ of struct sub_ in user endian format
+ */
+#define tipc_sub_write(sub_, field_, val_)                             \
+       ({                                                              \
+               struct tipc_subscr *sub__ = sub_;                       \
+               u32 val__ = val_;                                       \
+               int swap_ = !((sub__)->filter & TIPC_FILTER_MASK);      \
+               (sub__)->field_ = swap_ ? swab32(val__) : val__;        \
+       })
+
 /* tipc_evt_write - write val_ to field_ of struct evt_ in user endian format
  */
 #define tipc_evt_write(evt_, field_, val_)                             \
index 3a12fc18239b8184526b027ee9b3afce5893f717..446af7bbd13e68ccc225fca69fbd40dc75c30d1c 100644 (file)
@@ -237,8 +237,8 @@ static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
                if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
                        tipc_sub_unsubscribe(sub);
                        atomic_dec(&tn->subscription_count);
-               } else if (s) {
-                       break;
+                       if (s)
+                               break;
                }
        }
        spin_unlock_bh(&con->sub_lock);
@@ -362,9 +362,10 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
 {
        struct tipc_net *tn = tipc_net(srv->net);
        struct tipc_subscription *sub;
+       u32 s_filter = tipc_sub_read(s, filter);
 
-       if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
-               s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
+       if (s_filter & TIPC_SUB_CANCEL) {
+               tipc_sub_write(s, filter, s_filter & ~TIPC_SUB_CANCEL);
                tipc_conn_delete_sub(con, s);
                return 0;
        }
@@ -400,12 +401,15 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
                return -EWOULDBLOCK;
        if (ret == sizeof(s)) {
                read_lock_bh(&sk->sk_callback_lock);
-               ret = tipc_conn_rcv_sub(srv, con, &s);
+               /* RACE: the connection can be closed in the meantime */
+               if (likely(connected(con)))
+                       ret = tipc_conn_rcv_sub(srv, con, &s);
                read_unlock_bh(&sk->sk_callback_lock);
+               if (!ret)
+                       return 0;
        }
-       if (ret < 0)
-               tipc_conn_close(con);
 
+       tipc_conn_close(con);
        return ret;
 }
 
index d6620ad535461a4d04ed5ba90569ce8b7df9f994..28a283f26a8dff24d613e6ed57e5e69d894dae66 100644 (file)
@@ -161,9 +161,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
                         struct udp_bearer *ub, struct udp_media_addr *src,
                         struct udp_media_addr *dst, struct dst_cache *cache)
 {
-       struct dst_entry *ndst = dst_cache_get(cache);
+       struct dst_entry *ndst;
        int ttl, err = 0;
 
+       local_bh_disable();
+       ndst = dst_cache_get(cache);
        if (dst->proto == htons(ETH_P_IP)) {
                struct rtable *rt = (struct rtable *)ndst;
 
@@ -210,9 +212,11 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
                                           src->port, dst->port, false);
 #endif
        }
+       local_bh_enable();
        return err;
 
 tx_error:
+       local_bh_enable();
        kfree_skb(skb);
        return err;
 }
index c98e602a1a2ded6f90743723445dd4b4a6a3a4c2..8c2763eb6aae29589dbdfef3c7cd3f4e01f09c56 100644 (file)
@@ -206,10 +206,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
 
        kfree(aead_req);
 
+       spin_lock_bh(&ctx->decrypt_compl_lock);
        pending = atomic_dec_return(&ctx->decrypt_pending);
 
-       if (!pending && READ_ONCE(ctx->async_notify))
+       if (!pending && ctx->async_notify)
                complete(&ctx->async_wait.completion);
+       spin_unlock_bh(&ctx->decrypt_compl_lock);
 }
 
 static int tls_do_decryption(struct sock *sk,
@@ -467,10 +469,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
                        ready = true;
        }
 
+       spin_lock_bh(&ctx->encrypt_compl_lock);
        pending = atomic_dec_return(&ctx->encrypt_pending);
 
-       if (!pending && READ_ONCE(ctx->async_notify))
+       if (!pending && ctx->async_notify)
                complete(&ctx->async_wait.completion);
+       spin_unlock_bh(&ctx->encrypt_compl_lock);
 
        if (!ready)
                return;
@@ -780,7 +784,7 @@ static int tls_push_record(struct sock *sk, int flags,
 
 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
                               bool full_record, u8 record_type,
-                              size_t *copied, int flags)
+                              ssize_t *copied, int flags)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -796,10 +800,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
        psock = sk_psock_get(sk);
        if (!psock || !policy) {
                err = tls_push_record(sk, flags, record_type);
-               if (err && err != -EINPROGRESS) {
+               if (err && sk->sk_err == EBADMSG) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
+                       err = -sk->sk_err;
                }
+               if (psock)
+                       sk_psock_put(sk, psock);
                return err;
        }
 more_data:
@@ -822,9 +829,10 @@ more_data:
        switch (psock->eval) {
        case __SK_PASS:
                err = tls_push_record(sk, flags, record_type);
-               if (err && err != -EINPROGRESS) {
+               if (err && sk->sk_err == EBADMSG) {
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
+                       err = -sk->sk_err;
                        goto out_err;
                }
                break;
@@ -914,7 +922,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        unsigned char record_type = TLS_RECORD_TYPE_DATA;
        bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        bool eor = !(msg->msg_flags & MSG_MORE);
-       size_t try_to_copy, copied = 0;
+       size_t try_to_copy;
+       ssize_t copied = 0;
        struct sk_msg *msg_pl, *msg_en;
        struct tls_rec *rec;
        int required_size;
@@ -924,6 +933,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        int num_zc = 0;
        int orig_size;
        int ret = 0;
+       int pending;
 
        if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
                return -EOPNOTSUPP;
@@ -1090,13 +1100,19 @@ trim_sgl:
                goto send_end;
        } else if (num_zc) {
                /* Wait for pending encryptions to get completed */
-               smp_store_mb(ctx->async_notify, true);
+               spin_lock_bh(&ctx->encrypt_compl_lock);
+               ctx->async_notify = true;
 
-               if (atomic_read(&ctx->encrypt_pending))
+               pending = atomic_read(&ctx->encrypt_pending);
+               spin_unlock_bh(&ctx->encrypt_compl_lock);
+               if (pending)
                        crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
                else
                        reinit_completion(&ctx->async_wait.completion);
 
+               /* There can be no concurrent accesses, since we have no
+                * pending encrypt operations
+                */
                WRITE_ONCE(ctx->async_notify, false);
 
                if (ctx->async_wait.err) {
@@ -1116,7 +1132,7 @@ send_end:
 
        release_sock(sk);
        mutex_unlock(&tls_ctx->tx_lock);
-       return copied ? copied : ret;
+       return copied > 0 ? copied : ret;
 }
 
 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
@@ -1130,7 +1146,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
        struct sk_msg *msg_pl;
        struct tls_rec *rec;
        int num_async = 0;
-       size_t copied = 0;
+       ssize_t copied = 0;
        bool full_record;
        int record_room;
        int ret = 0;
@@ -1232,7 +1248,7 @@ wait_for_memory:
        }
 sendpage_end:
        ret = sk_stream_error(sk, flags, ret);
-       return copied ? copied : ret;
+       return copied > 0 ? copied : ret;
 }
 
 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
@@ -1727,6 +1743,7 @@ int tls_sw_recvmsg(struct sock *sk,
        bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        bool is_peek = flags & MSG_PEEK;
        int num_async = 0;
+       int pending;
 
        flags |= nonblock;
 
@@ -1889,8 +1906,11 @@ pick_next_record:
 recv_end:
        if (num_async) {
                /* Wait for all previously submitted records to be decrypted */
-               smp_store_mb(ctx->async_notify, true);
-               if (atomic_read(&ctx->decrypt_pending)) {
+               spin_lock_bh(&ctx->decrypt_compl_lock);
+               ctx->async_notify = true;
+               pending = atomic_read(&ctx->decrypt_pending);
+               spin_unlock_bh(&ctx->decrypt_compl_lock);
+               if (pending) {
                        err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
                        if (err) {
                                /* one of async decrypt failed */
@@ -1902,6 +1922,10 @@ recv_end:
                } else {
                        reinit_completion(&ctx->async_wait.completion);
                }
+
+               /* There can be no concurrent accesses, since we have no
+                * pending decrypt operations
+                */
                WRITE_ONCE(ctx->async_notify, false);
 
                /* Drain records from the rx_list & copy if required */
@@ -2081,8 +2105,9 @@ static void tls_data_ready(struct sock *sk)
        strp_data_ready(&ctx->strp);
 
        psock = sk_psock_get(sk);
-       if (psock && !list_empty(&psock->ingress_msg)) {
-               ctx->saved_data_ready(sk);
+       if (psock) {
+               if (!list_empty(&psock->ingress_msg))
+                       ctx->saved_data_ready(sk);
                sk_psock_put(sk, psock);
        }
 }
@@ -2287,6 +2312,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 
        if (tx) {
                crypto_init_wait(&sw_ctx_tx->async_wait);
+               spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
                crypto_info = &ctx->crypto_send.info;
                cctx = &ctx->tx;
                aead = &sw_ctx_tx->aead_send;
@@ -2295,6 +2321,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                sw_ctx_tx->tx_work.sk = sk;
        } else {
                crypto_init_wait(&sw_ctx_rx->async_wait);
+               spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
                crypto_info = &ctx->crypto_recv.info;
                cctx = &ctx->rx;
                skb_queue_head_init(&sw_ctx_rx->rx_list);
index a5f28708e0e75402e595a38ee91c57e9637e0289..626bf9044418cc78eef4da904334f5dfc87b458c 100644 (file)
@@ -1408,7 +1408,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
        /* Wait for children sockets to appear; these are the new sockets
         * created upon connection establishment.
         */
-       timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
+       timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
        prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
 
        while ((connected = vsock_dequeue_accept(listener)) == NULL &&
index 709038a4783e526d5c108c87b69c65202d2aff86..0edda1edf9882f702b9073bf82481f01eca630f0 100644 (file)
@@ -157,7 +157,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
 
 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
 {
+       if (pkt->tap_delivered)
+               return;
+
        vsock_deliver_tap(virtio_transport_build_skb, pkt);
+       pkt->tap_delivered = true;
 }
 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
 
@@ -1128,6 +1132,14 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
 
        lock_sock(sk);
 
+       /* Check if sk has been released before lock_sock */
+       if (sk->sk_shutdown == SHUTDOWN_MASK) {
+               (void)virtio_transport_reset_no_sock(t, pkt);
+               release_sock(sk);
+               sock_put(sk);
+               goto free_pkt;
+       }
+
        /* Update CID in case it has changed after a transport reset event */
        vsk->local_addr.svm_cid = dst.svm_cid;
 
index 341402b4f178808191d5fe1892747d9d92cbd3d1..ce024440fa51f0d0d87f88d831bcbcd98f62ba2f 100644 (file)
@@ -142,7 +142,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
        if (result)
                return result;
 
-       if (rdev->wiphy.debugfsdir)
+       if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
                debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
                               rdev->wiphy.debugfsdir,
                               rdev->wiphy.debugfsdir->d_parent, newname);
index 8aa415a38814b1545e73b89ef128aa4e4276885f..0285aaa1e93c17233748d38eef6d8b5c6059b67a 100644 (file)
@@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
                sk->sk_state_change(sk);
                sock_set_flag(sk, SOCK_DEAD);
        }
+       if (x25->neighbour) {
+               read_lock_bh(&x25_list_lock);
+               x25_neigh_put(x25->neighbour);
+               x25->neighbour = NULL;
+               read_unlock_bh(&x25_list_lock);
+       }
 }
 
 /*
index ed7a6060f73cadc9c0b812898be9132387a70846..3889bd9aec466d3532509954ad788714537b4477 100644 (file)
@@ -341,8 +341,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 {
        bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
        u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+       u64 npgs, addr = mr->addr, size = mr->len;
        unsigned int chunks, chunks_per_page;
-       u64 addr = mr->addr, size = mr->len;
        int err;
 
        if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
@@ -372,6 +372,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if ((addr + size) < addr)
                return -EINVAL;
 
+       npgs = div_u64(size, PAGE_SIZE);
+       if (npgs > U32_MAX)
+               return -EINVAL;
+
        chunks = (unsigned int)div_u64(size, chunk_size);
        if (chunks == 0)
                return -EINVAL;
@@ -391,7 +395,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        umem->size = size;
        umem->headroom = headroom;
        umem->chunk_size_nohr = chunk_size - headroom;
-       umem->npgs = size / PAGE_SIZE;
+       umem->npgs = (u32)npgs;
        umem->pgs = NULL;
        umem->user = NULL;
        umem->flags = mr->flags;
index 037ea156d2f93b5e0f5611f884f766cacfaefff9..5a0ff665b71a8dd4125827b13fcd8c3263e74142 100644 (file)
@@ -379,6 +379,7 @@ static void espintcp_destruct(struct sock *sk)
 {
        struct espintcp_ctx *ctx = espintcp_getctx(sk);
 
+       ctx->saved_destruct(sk);
        kfree(ctx);
 }
 
@@ -419,6 +420,7 @@ static int espintcp_init_sk(struct sock *sk)
        sk->sk_socket->ops = &espintcp_ops;
        ctx->saved_data_ready = sk->sk_data_ready;
        ctx->saved_write_space = sk->sk_write_space;
+       ctx->saved_destruct = sk->sk_destruct;
        sk->sk_data_ready = espintcp_data_ready;
        sk->sk_write_space = espintcp_write_space;
        sk->sk_destruct = espintcp_destruct;
index 6cc7f7f1dd68cdad76734bb05e11dd90d714c86c..f50d1f97cf8ec9e1c1464321e07e3fabfb1fbce6 100644 (file)
@@ -25,12 +25,10 @@ static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
        struct xfrm_offload *xo = xfrm_offload(skb);
 
        skb_reset_mac_len(skb);
-       pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
-
-       if (xo->flags & XFRM_GSO_SEGMENT) {
-               skb_reset_transport_header(skb);
+       if (xo->flags & XFRM_GSO_SEGMENT)
                skb->transport_header -= x->props.header_len;
-       }
+
+       pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
 }
 
 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
index aa35f23c491292f362858660e14d9a9e786c7445..8a202c44f89aefeed1a2036350b2dfc06d0962cc 100644 (file)
@@ -644,7 +644,7 @@ resume:
                dev_put(skb->dev);
 
                spin_lock(&x->lock);
-               if (nexthdr <= 0) {
+               if (nexthdr < 0) {
                        if (nexthdr == -EBADMSG) {
                                xfrm_audit_state_icvfail(x, skb,
                                                         x->type->proto);
index 3361e3ac5714cc6c751afe3eed996f8956180696..1e115cbf21d3b18bbda35754b9f544fa76a5daa4 100644 (file)
@@ -750,7 +750,28 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
        .get_link_net   = xfrmi_get_link_net,
 };
 
+static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+{
+       struct net *net;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+       list_for_each_entry(net, net_exit_list, exit_list) {
+               struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+               struct xfrm_if __rcu **xip;
+               struct xfrm_if *xi;
+
+               for (xip = &xfrmn->xfrmi[0];
+                    (xi = rtnl_dereference(*xip)) != NULL;
+                    xip = &xi->next)
+                       unregister_netdevice_queue(xi->dev, &list);
+       }
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
 static struct pernet_operations xfrmi_net_ops = {
+       .exit_batch = xfrmi_exit_batch_net,
        .id   = &xfrmi_net_id,
        .size = sizeof(struct xfrmi_net),
 };
index 2fd3d990d992fe26ddfa3ea870e0940ce58dec0d..69c4900db8172a486f53859665c7ebe373d313f1 100644 (file)
@@ -583,18 +583,20 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
                xfrm_state_hold(x);
 
                if (skb_is_gso(skb)) {
-                       skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+                       if (skb->inner_protocol)
+                               return xfrm_output_gso(net, sk, skb);
 
-                       return xfrm_output2(net, sk, skb);
+                       skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+                       goto out;
                }
 
                if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
                        goto out;
+       } else {
+               if (skb_is_gso(skb))
+                       return xfrm_output_gso(net, sk, skb);
        }
 
-       if (skb_is_gso(skb))
-               return xfrm_output_gso(net, sk, skb);
-
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                err = skb_checksum_help(skb);
                if (err) {
@@ -640,7 +642,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
 
        if (skb->protocol == htons(ETH_P_IP))
                proto = AF_INET;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+       else if (skb->protocol == htons(ETH_P_IPV6) &&
+                skb->sk->sk_family == AF_INET6)
                proto = AF_INET6;
        else
                return;
index 297b2fdb3c2977fd57c3a640550d50b5b661902c..564aa6492e7c397312f51e350a84b2063c5ed616 100644 (file)
@@ -1436,12 +1436,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
                                   struct xfrm_policy *pol)
 {
-       u32 mark = policy->mark.v & policy->mark.m;
-
-       if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
-               return true;
-
-       if ((mark & pol->mark.m) == pol->mark.v &&
+       if (policy->mark.v == pol->mark.v &&
            policy->priority == pol->priority)
                return true;
 
index 587b68b1f8dde6f4758dd16f8bb68a88f1816c84..430a4b7e353e662b8cbe4b896f9ad292aac8eeed 100644 (file)
@@ -15,8 +15,6 @@
 #define MAX_INDEX 64
 #define MAX_STARS 38
 
-char bpf_log_buf[BPF_LOG_BUF_SIZE];
-
 static void stars(char *str, long val, long max, int width)
 {
        int i;
index 80b4a70315b68f2758a2e126236280a2fa799fa3..13a35f7cbe661cfb3aa5334346417e20a2fe92f0 100644 (file)
@@ -416,7 +416,7 @@ TRACE_EVENT_FN(foo_bar_with_fn,
  * Note, TRACE_EVENT() itself is simply defined as:
  *
  * #define TRACE_EVENT(name, proto, args, tstruct, assign, printk)  \
- *  DEFINE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
+ *  DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
  *  DEFINE_EVENT(name, name, proto, args)
  *
  * The DEFINE_EVENT() also can be declared with conditions and reg functions:
index eac40f0abd56a9f4700423664824da8a537bfd61..b83be177edf0ac70760171665700847257bfbe5b 100755 (executable)
@@ -51,7 +51,7 @@ my %ignore_type = ();
 my @ignore = ();
 my $help = 0;
 my $configuration_file = ".checkpatch.conf";
-my $max_line_length = 80;
+my $max_line_length = 100;
 my $ignore_perl_version = 0;
 my $minimum_perl_version = 5.10.0;
 my $min_conf_desc_length = 4;
@@ -97,9 +97,11 @@ Options:
   --types TYPE(,TYPE2...)    show only these comma separated message types
   --ignore TYPE(,TYPE2...)   ignore various comma separated message types
   --show-types               show the specific message type in the output
-  --max-line-length=n        set the maximum line length, if exceeded, warn
+  --max-line-length=n        set the maximum line length, (default $max_line_length)
+                             if exceeded, warn on patches
+                             requires --strict for use with --file
   --min-conf-desc-length=n   set the min description length, if shorter, warn
-  --tab-size=n               set the number of spaces for tab (default 8)
+  --tab-size=n               set the number of spaces for tab (default $tabsize)
   --root=PATH                PATH to the kernel tree root
   --no-summary               suppress the per-file summary
   --mailback                 only produce a report in case of warnings/errors
@@ -3240,8 +3242,10 @@ sub process {
 
                        if ($msg_type ne "" &&
                            (show_type("LONG_LINE") || show_type($msg_type))) {
-                               WARN($msg_type,
-                                    "line over $max_line_length characters\n" . $herecurr);
+                               my $msg_level = \&WARN;
+                               $msg_level = \&CHK if ($file);
+                               &{$msg_level}($msg_type,
+                                             "line length of $length exceeds $max_line_length columns\n" . $herecurr);
                        }
                }
 
index ba8b8d5834e67922e0028dbec04b9e7cebc2766d..fbdb325cdf4f09e8fe34c76de9e2d1c26a82d41b 100755 (executable)
@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo  | cut -d" " -f1) - \
 faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
 faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
 
-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
+cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
 echo
 cat $T.aa
 cleanup
index f22858b2c3d695b28f9e9ad15f48c07ed8dfdeb9..80f354289eeb9423e5b6f0cb40845682e371668d 100644 (file)
@@ -4,6 +4,7 @@ GCC_PLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
 HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
 HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
 HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat
+HOST_EXTRACXXFLAGS += -Wno-format-diag
 
 $(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
 quiet_cmd_create_randomize_layout_seed = GENSEED $@
index 17f06079a71231180cb0fd5a4d096c15841a8e01..9ad76b7f3f10e71fc04c4a64196388ffe4e40481 100644 (file)
@@ -35,7 +35,9 @@
 #include "ggc.h"
 #include "timevar.h"
 
+#if BUILDING_GCC_VERSION < 10000
 #include "params.h"
+#endif
 
 #if BUILDING_GCC_VERSION <= 4009
 #include "pointer-set.h"
@@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
        return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
 }
 
+#if BUILDING_GCC_VERSION < 10000
 template <>
 template <>
 inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
@@ -860,6 +863,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs)
 {
        return gs->code == GIMPLE_RETURN;
 }
+#endif
 
 static inline gasm *as_a_gasm(gimple stmt)
 {
index dbd37460c573ec549fe33c7015f77206f4485064..cc75eeba0be1015fde86c981dfc1b3b02e99ff8e 100644 (file)
@@ -51,7 +51,6 @@ static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
        gimple stmt;
        gcall *stackleak_track_stack;
        cgraph_node_ptr node;
-       int frequency;
        basic_block bb;
 
        /* Insert call to void stackleak_track_stack(void) */
@@ -68,9 +67,9 @@ static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
        bb = gimple_bb(stackleak_track_stack);
        node = cgraph_get_create_node(track_function_decl);
        gcc_assert(node);
-       frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
        cgraph_create_edge(cgraph_get_node(current_function_decl), node,
-                       stackleak_track_stack, bb->count, frequency);
+                       stackleak_track_stack, bb->count,
+                       compute_call_stmt_bb_frequency(current_function_decl, bb));
 }
 
 static bool is_alloca(gimple stmt)
index 39db889b874c9e59136f626519514dec6c246962..c4b99160791785e5b346b0a86e8cb4e28f182b2d 100644 (file)
@@ -12,7 +12,7 @@ rb_node_type = utils.CachedType("struct rb_node")
 
 def rb_first(root):
     if root.type == rb_root_type.get_type():
-        node = node.address.cast(rb_root_type.get_type().pointer())
+        node = root.address.cast(rb_root_type.get_type().pointer())
     elif root.type != rb_root_type.get_type().pointer():
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
@@ -28,7 +28,7 @@ def rb_first(root):
 
 def rb_last(root):
     if root.type == rb_root_type.get_type():
-        node = node.address.cast(rb_root_type.get_type().pointer())
+        node = root.address.cast(rb_root_type.get_type().pointer())
     elif root.type != rb_root_type.get_type().pointer():
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
index 3e8dea6e0a9572c21b719b8ac111ed8097bab77e..6dc3078649fa0c41c79a8d4d26db82b11b15d9df 100644 (file)
@@ -34,7 +34,7 @@ struct sym_entry {
        unsigned int len;
        unsigned int start_pos;
        unsigned int percpu_absolute;
-       unsigned char sym[0];
+       unsigned char sym[];
 };
 
 struct addr_range {
index 22e73a3482bd6c393a8a3f712bebf263692d16ca..3baf435de5411b2d2f5965a75faf327b4b79355b 100644 (file)
@@ -30,7 +30,7 @@ obj-$(CONFIG_SECURITY_YAMA)           += yama/
 obj-$(CONFIG_SECURITY_LOADPIN)         += loadpin/
 obj-$(CONFIG_SECURITY_SAFESETID)       += safesetid/
 obj-$(CONFIG_SECURITY_LOCKDOWN_LSM)    += lockdown/
-obj-$(CONFIG_CGROUP_DEVICE)            += device_cgroup.o
+obj-$(CONFIG_CGROUPS)                  += device_cgroup.o
 obj-$(CONFIG_BPF_LSM)                  += bpf/
 
 # Object integrity file lists
index 280741fc0f5f4e3bbd680929999bcc42996ef04e..f6a3ecfadf8054d5f3a1142c205040714a0e58c2 100644 (file)
@@ -454,7 +454,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
         */
        error = aa_may_manage_policy(label, ns, mask);
        if (error)
-               return error;
+               goto end_section;
 
        data = aa_simple_write_to_buffer(buf, size, size, pos);
        error = PTR_ERR(data);
@@ -462,6 +462,7 @@ static ssize_t policy_update(u32 mask, const char __user *buf, size_t size,
                error = aa_replace_profiles(ns, label, mask, data);
                aa_put_loaddata(data);
        }
+end_section:
        end_current_label_crit_section(label);
 
        return error;
index 5a98661a8b46ea2daf47a3386d2fe181622a977b..5977325038157dc5a12a2d4ed663d7e09f8dbace 100644 (file)
@@ -197,8 +197,9 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
        rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
                                     GFP_KERNEL, true, false);
        if (IS_ERR(rule->label)) {
+               int err = PTR_ERR(rule->label);
                aa_audit_rule_free(rule);
-               return PTR_ERR(rule->label);
+               return err;
        }
 
        *vrule = rule;
index 6ceb74e0f7895548c5faaa6f95b7eb8c11367e87..a84ef030fbd7d18c0d6825ba8f07e89b241a98a9 100644 (file)
@@ -1328,6 +1328,7 @@ int aa_change_profile(const char *fqname, int flags)
                ctx->nnp = aa_get_label(label);
 
        if (!fqname || !*fqname) {
+               aa_put_label(label);
                AA_DEBUG("no profile name");
                return -EINVAL;
        }
@@ -1346,8 +1347,6 @@ int aa_change_profile(const char *fqname, int flags)
                        op = OP_CHANGE_PROFILE;
        }
 
-       label = aa_get_current_label();
-
        if (*fqname == '&') {
                stack = true;
                /* don't have label_parse() do stacking */
index f4ee0ae106b282a12adb338f8caa6ce219cfbceb..0ca31c8bc0b13b8dcb70ceabbc466962df162748 100644 (file)
@@ -812,6 +812,7 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
        int ret;
        kuid_t root_uid;
 
+       new->cap_ambient = old->cap_ambient;
        if (WARN_ON(!cap_ambient_invariant_ok(old)))
                return -EPERM;
 
index 7d0f8f7431ff5466af18e6105b4ed5c2b9136047..43ab0ad45c1b62b8cdd16b956e120dabccc78fee 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/rcupdate.h>
 #include <linux/mutex.h>
 
+#ifdef CONFIG_CGROUP_DEVICE
+
 static DEFINE_MUTEX(devcgroup_mutex);
 
 enum devcg_behavior {
@@ -792,7 +794,7 @@ struct cgroup_subsys devices_cgrp_subsys = {
 };
 
 /**
- * __devcgroup_check_permission - checks if an inode operation is permitted
+ * devcgroup_legacy_check_permission - checks if an inode operation is permitted
  * @dev_cgroup: the dev cgroup to be tested against
  * @type: device type
  * @major: device major number
@@ -801,7 +803,7 @@ struct cgroup_subsys devices_cgrp_subsys = {
  *
  * returns 0 on success, -EPERM case the operation is not permitted
  */
-static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
                                        short access)
 {
        struct dev_cgroup *dev_cgroup;
@@ -825,6 +827,10 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
        return 0;
 }
 
+#endif /* CONFIG_CGROUP_DEVICE */
+
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+
 int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
 {
        int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
@@ -832,6 +838,13 @@ int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
        if (rc)
                return -EPERM;
 
-       return __devcgroup_check_permission(type, major, minor, access);
+       #ifdef CONFIG_CGROUP_DEVICE
+       return devcgroup_legacy_check_permission(type, major, minor, access);
+
+       #else /* CONFIG_CGROUP_DEVICE */
+       return 0;
+
+       #endif /* CONFIG_CGROUP_DEVICE */
 }
 EXPORT_SYMBOL(devcgroup_check_permission);
+#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
index 35682852ddea94d5a09bbe19020b2460950665b6..764b896cd628339ba1efc0047f1d069fd67406bd 100644 (file)
@@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
 {
        long rc;
        const char *algo;
-       struct crypto_shash **tfm;
+       struct crypto_shash **tfm, *tmp_tfm;
        struct shash_desc *desc;
 
        if (type == EVM_XATTR_HMAC) {
@@ -91,31 +91,31 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
                algo = hash_algo_name[hash_algo];
        }
 
-       if (*tfm == NULL) {
-               mutex_lock(&mutex);
-               if (*tfm)
-                       goto out;
-               *tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
-               if (IS_ERR(*tfm)) {
-                       rc = PTR_ERR(*tfm);
-                       pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
-                       *tfm = NULL;
+       if (*tfm)
+               goto alloc;
+       mutex_lock(&mutex);
+       if (*tfm)
+               goto unlock;
+
+       tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
+       if (IS_ERR(tmp_tfm)) {
+               pr_err("Can not allocate %s (reason: %ld)\n", algo,
+                      PTR_ERR(tmp_tfm));
+               mutex_unlock(&mutex);
+               return ERR_CAST(tmp_tfm);
+       }
+       if (type == EVM_XATTR_HMAC) {
+               rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len);
+               if (rc) {
+                       crypto_free_shash(tmp_tfm);
                        mutex_unlock(&mutex);
                        return ERR_PTR(rc);
                }
-               if (type == EVM_XATTR_HMAC) {
-                       rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len);
-                       if (rc) {
-                               crypto_free_shash(*tfm);
-                               *tfm = NULL;
-                               mutex_unlock(&mutex);
-                               return ERR_PTR(rc);
-                       }
-               }
-out:
-               mutex_unlock(&mutex);
        }
-
+       *tfm = tmp_tfm;
+unlock:
+       mutex_unlock(&mutex);
+alloc:
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
                        GFP_KERNEL);
        if (!desc)
@@ -207,7 +207,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
        data->hdr.length = crypto_shash_digestsize(desc->tfm);
 
        error = -ENODATA;
-       list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+       list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
                bool is_ima = false;
 
                if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
index d361d7fdafc49f11215e52e7f27fc9d6549671fb..0d36259b690df34d9ae7ea9b47fdd2f02bf99a73 100644 (file)
@@ -97,7 +97,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
        if (!(inode->i_opflags & IOP_XATTR))
                return -EOPNOTSUPP;
 
-       list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+       list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
                error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
                if (error < 0) {
                        if (error == -ENODATA)
@@ -228,7 +228,7 @@ static int evm_protected_xattr(const char *req_xattr_name)
        struct xattr_list *xattr;
 
        namelen = strlen(req_xattr_name);
-       list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
+       list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
                if ((strlen(xattr->name) == namelen)
                    && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
                        found = 1;
index 39ad1038d45dad4f06d84c16fdb169384400b007..cfc3075769bb0ddcf543128a2fb2eb0854342fbc 100644 (file)
@@ -232,7 +232,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
                goto out;
        }
 
-       /* Guard against races in evm_read_xattrs */
+       /*
+        * xattr_list_mutex guards against races in evm_read_xattrs().
+        * Entries are only added to the evm_config_xattrnames list
+        * and never deleted. Therefore, the list is traversed
+        * using list_for_each_entry_lockless() without holding
+        * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
+        * and evm_protected_xattr().
+        */
        mutex_lock(&xattr_list_mutex);
        list_for_each_entry(tmp, &evm_config_xattrnames, list) {
                if (strcmp(xattr->name, tmp->name) == 0) {
index 423c84f95a141bc1f88decff38c7cddea9465f99..88b5e288f2419dd2ff1ca807bfa62a5a69dd3347 100644 (file)
@@ -411,7 +411,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
        loff_t i_size;
        int rc;
        struct file *f = file;
-       bool new_file_instance = false, modified_flags = false;
+       bool new_file_instance = false, modified_mode = false;
 
        /*
         * For consistency, fail file's opened with the O_DIRECT flag on
@@ -431,13 +431,13 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
                f = dentry_open(&file->f_path, flags, file->f_cred);
                if (IS_ERR(f)) {
                        /*
-                        * Cannot open the file again, lets modify f_flags
+                        * Cannot open the file again, lets modify f_mode
                         * of original and continue
                         */
                        pr_info_ratelimited("Unable to reopen file for reading.\n");
                        f = file;
-                       f->f_flags |= FMODE_READ;
-                       modified_flags = true;
+                       f->f_mode |= FMODE_READ;
+                       modified_mode = true;
                } else {
                        new_file_instance = true;
                }
@@ -455,8 +455,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
 out:
        if (new_file_instance)
                fput(f);
-       else if (modified_flags)
-               f->f_flags &= ~FMODE_READ;
+       else if (modified_mode)
+               f->f_mode &= ~FMODE_READ;
        return rc;
 }
 
index a71e822a6e92d4d8feeb3957cbc48f1f1fe82660..3efc8308ad266c3199c69bd2ee1f6f43c4677e78 100644 (file)
@@ -338,8 +338,7 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
                integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
                                    "policy_update", "signed policy required",
                                    1, 0);
-               if (ima_appraise & IMA_APPRAISE_ENFORCE)
-                       result = -EACCES;
+               result = -EACCES;
        } else {
                result = ima_parse_add_rule(data);
        }
index 7fed24b9d57e62d7a103db0e924dec8491e738f0..51de970fbb1edf5b7af588a13bc0f4a3932c93bf 100644 (file)
@@ -1965,8 +1965,20 @@ EXPORT_SYMBOL(security_ismaclabel);
 
 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
 {
-       return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata,
-                               seclen);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * Currently, only one LSM can implement secid_to_secctx (i.e this
+        * LSM hook is not "stackable").
+        */
+       hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) {
+               rc = hp->hook.secid_to_secctx(secid, secdata, seclen);
+               if (rc != LSM_RET_DEFAULT(secid_to_secctx))
+                       return rc;
+       }
+
+       return LSM_RET_DEFAULT(secid_to_secctx);
 }
 EXPORT_SYMBOL(security_secid_to_secctx);
 
index 0b4e32161b7775df189e59624dd43923d8df1019..4c037c2545c1613b2b517d583f8d696ae00452bf 100644 (file)
@@ -5842,40 +5842,60 @@ static unsigned int selinux_ipv6_postroute(void *priv,
 
 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       int err = 0;
-       u32 perm;
+       int rc = 0;
+       unsigned int msg_len;
+       unsigned int data_len = skb->len;
+       unsigned char *data = skb->data;
        struct nlmsghdr *nlh;
        struct sk_security_struct *sksec = sk->sk_security;
+       u16 sclass = sksec->sclass;
+       u32 perm;
 
-       if (skb->len < NLMSG_HDRLEN) {
-               err = -EINVAL;
-               goto out;
-       }
-       nlh = nlmsg_hdr(skb);
+       while (data_len >= nlmsg_total_size(0)) {
+               nlh = (struct nlmsghdr *)data;
+
+               /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+                *       users which means we can't reject skb's with bogus
+                *       length fields; our solution is to follow what
+                *       netlink_rcv_skb() does and simply skip processing at
+                *       messages with length fields that are clearly junk
+                */
+               if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+                       return 0;
 
-       err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
-       if (err) {
-               if (err == -EINVAL) {
+               rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+               if (rc == 0) {
+                       rc = sock_has_perm(sk, perm);
+                       if (rc)
+                               return rc;
+               } else if (rc == -EINVAL) {
+                       /* -EINVAL is a missing msg/perm mapping */
                        pr_warn_ratelimited("SELinux: unrecognized netlink"
-                              " message: protocol=%hu nlmsg_type=%hu sclass=%s"
-                              " pid=%d comm=%s\n",
-                              sk->sk_protocol, nlh->nlmsg_type,
-                              secclass_map[sksec->sclass - 1].name,
-                              task_pid_nr(current), current->comm);
-                       if (!enforcing_enabled(&selinux_state) ||
-                           security_get_allow_unknown(&selinux_state))
-                               err = 0;
+                               " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+                               " pid=%d comm=%s\n",
+                               sk->sk_protocol, nlh->nlmsg_type,
+                               secclass_map[sclass - 1].name,
+                               task_pid_nr(current), current->comm);
+                       if (enforcing_enabled(&selinux_state) &&
+                           !security_get_allow_unknown(&selinux_state))
+                               return rc;
+                       rc = 0;
+               } else if (rc == -ENOENT) {
+                       /* -ENOENT is a missing socket/class mapping, ignore */
+                       rc = 0;
+               } else {
+                       return rc;
                }
 
-               /* Ignore */
-               if (err == -ENOENT)
-                       err = 0;
-               goto out;
+               /* move to the next message after applying netlink padding */
+               msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msg_len >= data_len)
+                       return 0;
+               data_len -= msg_len;
+               data += msg_len;
        }
 
-       err = sock_has_perm(sk, perm);
-out:
-       return err;
+       return rc;
 }
 
 static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
index 939a74fd8fb470e68ef6fd9172a11fd2e42679d4..da94a1b4bfda07dccee4dfeaf04b86c3440f82ce 100644 (file)
@@ -429,7 +429,7 @@ int cond_read_list(struct policydb *p, void *fp)
 
        p->cond_list = kcalloc(len, sizeof(*p->cond_list), GFP_KERNEL);
        if (!p->cond_list)
-               return rc;
+               return -ENOMEM;
 
        rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
        if (rc)
index b412d3b3d5ffcbf95ba11edc5ab7154548e4b1d9..21edb8ac95eb354d8ec95c2b262cf61eb4db7982 100644 (file)
@@ -216,12 +216,12 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
        if (info.index >= 32)
                return -EINVAL;
        /* check whether the dsp was already loaded */
-       if (hw->dsp_loaded & (1 << info.index))
+       if (hw->dsp_loaded & (1u << info.index))
                return -EBUSY;
        err = hw->ops.dsp_load(hw, &info);
        if (err < 0)
                return err;
-       hw->dsp_loaded |= (1 << info.index);
+       hw->dsp_loaded |= (1u << info.index);
        return 0;
 }
 
index 59d62f05658f9e2626c62885a32bacbb9b9a387d..1545f8fdb4db6c4c7420151b7014ad8830132622 100644 (file)
@@ -205,13 +205,14 @@ static snd_pcm_sframes_t calc_dst_frames(struct snd_pcm_substream *plug,
        plugin = snd_pcm_plug_first(plug);
        while (plugin && frames > 0) {
                plugin_next = plugin->next;
+               if (check_size && plugin->buf_frames &&
+                   frames > plugin->buf_frames)
+                       frames = plugin->buf_frames;
                if (plugin->dst_frames) {
                        frames = plugin->dst_frames(plugin, frames);
                        if (frames < 0)
                                return frames;
                }
-               if (check_size && frames > plugin->buf_frames)
-                       frames = plugin->buf_frames;
                plugin = plugin_next;
        }
        return frames;
@@ -225,14 +226,15 @@ static snd_pcm_sframes_t calc_src_frames(struct snd_pcm_substream *plug,
 
        plugin = snd_pcm_plug_last(plug);
        while (plugin && frames > 0) {
-               if (check_size && frames > plugin->buf_frames)
-                       frames = plugin->buf_frames;
                plugin_prev = plugin->prev;
                if (plugin->src_frames) {
                        frames = plugin->src_frames(plugin, frames);
                        if (frames < 0)
                                return frames;
                }
+               if (check_size && plugin->buf_frames &&
+                   frames > plugin->buf_frames)
+                       frames = plugin->buf_frames;
                plugin = plugin_prev;
        }
        return frames;
index 872a852de75c0189d070c57cfad3664787b883a9..d531e1bc2b8138267e547331e1be8d9e7e55a3e8 100644 (file)
@@ -433,6 +433,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
 
  no_delta_check:
        if (runtime->status->hw_ptr == new_hw_ptr) {
+               runtime->hw_ptr_jiffies = curr_jiffies;
                update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
                return 0;
        }
index 20dd08e1f675699b98695ef5e398829fa2945c0f..2a688b711a9ac698ab19aeacb41f6bef747c59da 100644 (file)
@@ -120,6 +120,17 @@ static void snd_rawmidi_input_event_work(struct work_struct *work)
                runtime->event(runtime->substream);
 }
 
+/* buffer refcount management: call with runtime->lock held */
+static inline void snd_rawmidi_buffer_ref(struct snd_rawmidi_runtime *runtime)
+{
+       runtime->buffer_ref++;
+}
+
+static inline void snd_rawmidi_buffer_unref(struct snd_rawmidi_runtime *runtime)
+{
+       runtime->buffer_ref--;
+}
+
 static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
 {
        struct snd_rawmidi_runtime *runtime;
@@ -669,6 +680,11 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
                if (!newbuf)
                        return -ENOMEM;
                spin_lock_irq(&runtime->lock);
+               if (runtime->buffer_ref) {
+                       spin_unlock_irq(&runtime->lock);
+                       kvfree(newbuf);
+                       return -EBUSY;
+               }
                oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
@@ -1019,8 +1035,10 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
        long result = 0, count1;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        unsigned long appl_ptr;
+       int err = 0;
 
        spin_lock_irqsave(&runtime->lock, flags);
+       snd_rawmidi_buffer_ref(runtime);
        while (count > 0 && runtime->avail) {
                count1 = runtime->buffer_size - runtime->appl_ptr;
                if (count1 > count)
@@ -1039,16 +1057,19 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
                if (userbuf) {
                        spin_unlock_irqrestore(&runtime->lock, flags);
                        if (copy_to_user(userbuf + result,
-                                        runtime->buffer + appl_ptr, count1)) {
-                               return result > 0 ? result : -EFAULT;
-                       }
+                                        runtime->buffer + appl_ptr, count1))
+                               err = -EFAULT;
                        spin_lock_irqsave(&runtime->lock, flags);
+                       if (err)
+                               goto out;
                }
                result += count1;
                count -= count1;
        }
+ out:
+       snd_rawmidi_buffer_unref(runtime);
        spin_unlock_irqrestore(&runtime->lock, flags);
-       return result;
+       return result > 0 ? result : err;
 }
 
 long snd_rawmidi_kernel_read(struct snd_rawmidi_substream *substream,
@@ -1342,6 +1363,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
                        return -EAGAIN;
                }
        }
+       snd_rawmidi_buffer_ref(runtime);
        while (count > 0 && runtime->avail > 0) {
                count1 = runtime->buffer_size - runtime->appl_ptr;
                if (count1 > count)
@@ -1373,6 +1395,7 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
        }
       __end:
        count1 = runtime->avail < runtime->buffer_size;
+       snd_rawmidi_buffer_unref(runtime);
        spin_unlock_irqrestore(&runtime->lock, flags);
        if (count1)
                snd_rawmidi_output_trigger(substream, 1);
index 16c7f6605511ee848ec78e6f719926cf66d945b3..26e7cb555d3c593d81c2653267f95469190e6cbe 100644 (file)
@@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet,
                __entry->irq,
                __entry->index,
                __print_array(__get_dynamic_array(cip_header),
-                             __get_dynamic_array_len(cip_header),
-                             sizeof(u8)))
+                             __get_dynamic_array_len(cip_header), 1))
 );
 
 #endif
index e764816a8f7a1f89f045cedaa149d8ee21180e9f..b039429e687171c5cf40fe942383f53acd3f82fa 100644 (file)
@@ -867,10 +867,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
        spin_unlock_irqrestore(&chip->lock, flags);
 }
 
+static inline void snd_miro_write_mask(struct snd_miro *chip,
+               unsigned char reg, unsigned char value, unsigned char mask)
+{
+       unsigned char oldval = snd_miro_read(chip, reg);
 
-#define snd_miro_write_mask(chip, reg, value, mask)    \
-       snd_miro_write(chip, reg,                       \
-               (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+       snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
 
 /*
  *  Proc Interface
index d06b29693c85f0e4f02e9b81c88d0b00897f83c2..0e6d20e4915854f99517c7cc47dbe5a5dafe0773 100644 (file)
@@ -317,10 +317,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
 }
 
 
-#define snd_opti9xx_write_mask(chip, reg, value, mask) \
-       snd_opti9xx_write(chip, reg,                    \
-               (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip,
+               unsigned char reg, unsigned char value, unsigned char mask)
+{
+       unsigned char oldval = snd_opti9xx_read(chip, reg);
 
+       snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
 
 static int snd_opti9xx_configure(struct snd_opti9xx *chip,
                                           long port,
index 457a2c0654856ca932da2b58a16b4eac2b88ec42..0310193ea1bd4dd59beda03b250109aea56497dd 100644 (file)
@@ -2078,9 +2078,10 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
  * some HD-audio PCI entries are exposed without any codecs, and such devices
  * should be ignored from the beginning.
  */
-static const struct snd_pci_quirk driver_blacklist[] = {
-       SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
-       SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
+static const struct pci_device_id driver_blacklist[] = {
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */
        {}
 };
 
@@ -2100,7 +2101,7 @@ static int azx_probe(struct pci_dev *pci,
        bool schedule_probe;
        int err;
 
-       if (snd_pci_quirk_lookup(pci, driver_blacklist)) {
+       if (pci_match_id(driver_blacklist, pci)) {
                dev_info(&pci->dev, "Skipping the blacklisted device\n");
                return -ENODEV;
        }
index 4eff16053bd5556177cd2bc5b3aefda6ec07659e..93760a3564cfa5261e2f00f10863d971730b4d05 100644 (file)
@@ -1848,8 +1848,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
        /* Add sanity check to pass klockwork check.
         * This should never happen.
         */
-       if (WARN_ON(spdif == NULL))
+       if (WARN_ON(spdif == NULL)) {
+               mutex_unlock(&codec->spdif_mutex);
                return true;
+       }
        non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
        mutex_unlock(&codec->spdif_mutex);
        return non_pcm;
@@ -2198,7 +2200,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+               struct hdmi_eld *pin_eld = &per_pin->sink_eld;
 
+               pin_eld->eld_valid = false;
                hdmi_present_sense(per_pin, 0);
        }
 
index c1a85c8f7b69cb8d4d807ff30ea8eba542d0639e..e62d58872b6e600c9f12788a8d03a3bde6b9ed0e 100644 (file)
@@ -384,6 +384,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0282:
        case 0x10ec0283:
        case 0x10ec0286:
+       case 0x10ec0287:
        case 0x10ec0288:
        case 0x10ec0285:
        case 0x10ec0298:
@@ -2457,6 +2458,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
@@ -2472,6 +2474,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5480,18 +5485,9 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
                { 0x19, 0x21a11010 }, /* dock mic */
                { }
        };
-       /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
-        * the speaker output becomes too low by some reason on Thinkpads with
-        * ALC298 codec
-        */
-       static const hda_nid_t preferred_pairs[] = {
-               0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
-               0
-       };
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->gen.preferred_dacs = preferred_pairs;
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                snd_hda_apply_pincfgs(codec, pincfgs);
        } else if (action == HDA_FIXUP_ACT_INIT) {
@@ -5504,6 +5500,23 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
        }
 }
 
+static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
+                                 const struct hda_fixup *fix, int action)
+{
+       /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+        * the speaker output becomes too low by some reason on Thinkpads with
+        * ALC298 codec
+        */
+       static const hda_nid_t preferred_pairs[] = {
+               0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+               0
+       };
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->gen.preferred_dacs = preferred_pairs;
+}
+
 static void alc_shutup_dell_xps13(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
@@ -5856,6 +5869,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
        }
 }
 
+static void alc225_fixup_s3_pop_noise(struct hda_codec *codec,
+                                     const struct hda_fixup *fix, int action)
+{
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       codec->power_save_node = 1;
+}
+
 /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
 static void alc274_fixup_bind_dacs(struct hda_codec *codec,
                                    const struct hda_fixup *fix, int action)
@@ -5960,6 +5982,7 @@ enum {
        ALC269_FIXUP_HP_LINE1_MIC1_LED,
        ALC269_FIXUP_INV_DMIC,
        ALC269_FIXUP_LENOVO_DOCK,
+       ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST,
        ALC269_FIXUP_NO_SHUTUP,
        ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
        ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
@@ -6045,9 +6068,11 @@ enum {
        ALC233_FIXUP_ACER_HEADSET_MIC,
        ALC294_FIXUP_LENOVO_MIC_LOCATION,
        ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
+       ALC225_FIXUP_S3_POP_NOISE,
        ALC700_FIXUP_INTEL_REFERENCE,
        ALC274_FIXUP_DELL_BIND_DACS,
        ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+       ALC298_FIXUP_TPT470_DOCK_FIX,
        ALC298_FIXUP_TPT470_DOCK,
        ALC255_FIXUP_DUMMY_LINEOUT_VERB,
        ALC255_FIXUP_DELL_HEADSET_MIC,
@@ -6080,9 +6105,12 @@ enum {
        ALC294_FIXUP_ASUS_DUAL_SPK,
        ALC285_FIXUP_THINKPAD_HEADSET_JACK,
        ALC294_FIXUP_ASUS_HPE,
+       ALC294_FIXUP_ASUS_COEF_1B,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_MUTE_LED,
+       ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
+       ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6280,6 +6308,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
        },
+       [ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_LENOVO_DOCK,
+       },
        [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
@@ -6932,6 +6966,12 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
                .chained = true,
+               .chain_id = ALC225_FIXUP_S3_POP_NOISE
+       },
+       [ALC225_FIXUP_S3_POP_NOISE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc225_fixup_s3_pop_noise,
+               .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
        [ALC700_FIXUP_INTEL_REFERENCE] = {
@@ -6964,12 +7004,18 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC274_FIXUP_DELL_BIND_DACS
        },
-       [ALC298_FIXUP_TPT470_DOCK] = {
+       [ALC298_FIXUP_TPT470_DOCK_FIX] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_tpt470_dock,
                .chained = true,
                .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
        },
+       [ALC298_FIXUP_TPT470_DOCK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_tpt470_dacs,
+               .chained = true,
+               .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX
+       },
        [ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -7204,6 +7250,17 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
        },
+       [ALC294_FIXUP_ASUS_COEF_1B] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Set bit 10 to correct noisy output after reboot from
+                        * Windows 10 (due to pop noise reduction?)
+                        */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x1b },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x4e4b },
+                       { }
+               },
+       },
        [ALC285_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_gpio_led,
@@ -7216,6 +7273,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc236_fixup_hp_mute_led,
        },
+       [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc5 },
+                       { }
+               },
+       },
+       [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7383,8 +7456,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
+       SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -7410,6 +7485,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+       SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
@@ -7420,12 +7497,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
-       SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
@@ -7564,6 +7642,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_HEADSET_MODE, .name = "headset-mode"},
        {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+       {.id = ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST, .name = "lenovo-dock-limit-boost"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
@@ -7575,6 +7654,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
        {.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+       {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"},
        {.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
        {.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
        {.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
@@ -7992,6 +8072,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60130},
                {0x17, 0x90170110},
                {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60120},
+               {0x17, 0x90170110},
+               {0x21, 0x04211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1043, "ASUS", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x03211020}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x04211020}),
@@ -8201,6 +8293,7 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0215:
        case 0x10ec0245:
        case 0x10ec0285:
+       case 0x10ec0287:
        case 0x10ec0289:
                spec->codec_variant = ALC269_TYPE_ALC215;
                spec->shutup = alc225_shutup;
@@ -8208,8 +8301,6 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0;
                break;
        case 0x10ec0225:
-               codec->power_save_node = 1;
-               /* fall through */
        case 0x10ec0295:
        case 0x10ec0299:
                spec->codec_variant = ALC269_TYPE_ALC225;
@@ -9481,6 +9572,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
index 884d0cdec08cdd911816137991938f3627a29f7f..73e1e5400506a82b5faf29fd2b29c9e8eb1b90b1 100644 (file)
@@ -2332,7 +2332,8 @@ static int snd_ice1712_chip_init(struct snd_ice1712 *ice)
        pci_write_config_byte(ice->pci, 0x61, ice->eeprom.data[ICE_EEP1_ACLINK]);
        pci_write_config_byte(ice->pci, 0x62, ice->eeprom.data[ICE_EEP1_I2SID]);
        pci_write_config_byte(ice->pci, 0x63, ice->eeprom.data[ICE_EEP1_SPDIF]);
-       if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24) {
+       if (ice->eeprom.subvendor != ICE1712_SUBDEVICE_STDSP24 &&
+           ice->eeprom.subvendor != ICE1712_SUBDEVICE_STAUDIO_ADCIII) {
                ice->gpio.write_mask = ice->eeprom.gpiomask;
                ice->gpio.direction = ice->eeprom.gpiodir;
                snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK,
index d37db32ecd3b736fe737a94ef25ddedeaa9a826d..e39dc85c355a0c8828100854634a0ee85683f127 100644 (file)
@@ -21,8 +21,7 @@
 enum {
        LINE6_PODHD300,
        LINE6_PODHD400,
-       LINE6_PODHD500_0,
-       LINE6_PODHD500_1,
+       LINE6_PODHD500,
        LINE6_PODX3,
        LINE6_PODX3LIVE,
        LINE6_PODHD500X,
@@ -318,8 +317,7 @@ static const struct usb_device_id podhd_id_table[] = {
        /* TODO: no need to alloc data interfaces when only audio is used */
        { LINE6_DEVICE(0x5057),    .driver_info = LINE6_PODHD300 },
        { LINE6_DEVICE(0x5058),    .driver_info = LINE6_PODHD400 },
-       { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 },
-       { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
+       { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500 },
        { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
        { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
        { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X },
@@ -352,23 +350,13 @@ static const struct line6_properties podhd_properties_table[] = {
                .ep_audio_r = 0x82,
                .ep_audio_w = 0x01,
        },
-       [LINE6_PODHD500_0] = {
+       [LINE6_PODHD500] = {
                .id = "PODHD500",
                .name = "POD HD500",
-               .capabilities   = LINE6_CAP_PCM
+               .capabilities   = LINE6_CAP_PCM | LINE6_CAP_CONTROL
                                | LINE6_CAP_HWMON,
                .altsetting = 1,
-               .ep_ctrl_r = 0x81,
-               .ep_ctrl_w = 0x01,
-               .ep_audio_r = 0x86,
-               .ep_audio_w = 0x02,
-       },
-       [LINE6_PODHD500_1] = {
-               .id = "PODHD500",
-               .name = "POD HD500",
-               .capabilities   = LINE6_CAP_PCM
-                               | LINE6_CAP_HWMON,
-               .altsetting = 0,
+               .ctrl_if = 1,
                .ep_ctrl_r = 0x81,
                .ep_ctrl_w = 0x01,
                .ep_audio_r = 0x86,
index a88d7854513b96ff7dc3f44ccaebfeea2b12ee35..15769f266790e359bb9c962ebda8464bb52f334d 100644 (file)
@@ -1182,6 +1182,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
                        cval->res = 384;
                }
                break;
+       case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */
+               if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
+                       strstr(kctl->id.name, "Capture Volume") != NULL) {
+                       cval->min >>= 8;
+                       cval->max = 0;
+                       cval->res = 1;
+               }
+               break;
        }
 }
 
index 0260c750e15693267b1b16095e124f6fb60cbeb6..9af7aa93f6fabc2d826ae510e97e1f680e8c1719 100644 (file)
@@ -397,6 +397,21 @@ static const struct usbmix_connector_map trx40_mobo_connector_map[] = {
        {}
 };
 
+/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */
+static const struct usbmix_name_map aorus_master_alc1220vb_map[] = {
+       { 17, NULL },                   /* OT, IEC958?, disabled */
+       { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+       { 16, "Line Out" },             /* OT */
+       { 22, "Line Out Playback" },    /* FU */
+       { 7, "Line" },                  /* IT */
+       { 19, "Line Capture" },         /* FU */
+       { 8, "Mic" },                   /* IT */
+       { 20, "Mic Capture" },          /* FU */
+       { 9, "Front Mic" },             /* IT */
+       { 21, "Front Mic Capture" },    /* FU */
+       {}
+};
+
 /*
  * Control map entries
  */
@@ -526,6 +541,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x1b1c, 0x0a42),
                .map = corsair_virtuoso_map,
        },
+       {       /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */
+               .id = USB_ID(0x0414, 0xa001),
+               .map = aorus_master_alc1220vb_map,
+       },
        {       /* Gigabyte TRX40 Aorus Pro WiFi */
                .id = USB_ID(0x0414, 0xa002),
                .map = trx40_mobo_map,
@@ -549,6 +568,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = trx40_mobo_map,
                .connector_map = trx40_mobo_connector_map,
        },
+       {       /* Asrock TRX40 Creator */
+               .id = USB_ID(0x26ce, 0x0a01),
+               .map = trx40_mobo_map,
+               .connector_map = trx40_mobo_connector_map,
+       },
        { 0 } /* terminator */
 };
 
index a1df4c5b4f8cbf9ae6c63f95fe759897a41dda43..eb89902a83bebbf1d442d33714fc325093cf79e3 100644 (file)
@@ -3563,6 +3563,32 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
 ALC1220_VB_DESKTOP(0x0414, 0xa002), /* Gigabyte TRX40 Aorus Pro WiFi */
 ALC1220_VB_DESKTOP(0x0db0, 0x0d64), /* MSI TRX40 Creator */
 ALC1220_VB_DESKTOP(0x0db0, 0x543d), /* MSI TRX40 */
+ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
 #undef ALC1220_VB_DESKTOP
 
+/* Two entries for Gigabyte TRX40 Aorus Master:
+ * TRX40 Aorus Master has two USB-audio devices, one for the front headphone
+ * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear
+ * panel and the front mic) with Realtek ALC1220-VB.
+ * Here we provide two distinct names for making UCM profiles easier.
+ */
+{
+       USB_DEVICE(0x0414, 0xa000),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .vendor_name = "Gigabyte",
+               .product_name = "Aorus Master Front Headphone",
+               .profile_name = "Gigabyte-Aorus-Master-Front-Headphone",
+               .ifnum = QUIRK_NO_INTERFACE
+       }
+},
+{
+       USB_DEVICE(0x0414, 0xa001),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .vendor_name = "Gigabyte",
+               .product_name = "Aorus Master Main Audio",
+               .profile_name = "Gigabyte-Aorus-Master-Main-Audio",
+               .ifnum = QUIRK_NO_INTERFACE
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index 351ba214a9d3fe1ba80129fb38a68bce570189df..d8a765be5dfe8e2f62d240e4f1a83da13febab5c 100644 (file)
@@ -1636,13 +1636,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                msleep(20);
 
-       /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
-        * otherwise requests like get/set frequency return as failed despite
-        * actually succeeding.
+       /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny
+        * delay here, otherwise requests like get/set frequency return as
+        * failed despite actually succeeding.
         */
        if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
             chip->usb_id == USB_ID(0x046d, 0x0a46) ||
-            chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
+            chip->usb_id == USB_ID(0x0b0e, 0x0349) ||
+            chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                usleep_range(1000, 2000);
 }
@@ -1687,7 +1688,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
        case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
-       case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+       case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
        case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
        case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
        case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
index 196fdd02b8b1b3d71ac994715f7dee8fa8b5fa52..30d7d04d72d6bad6dfb419f7606e8d5c8fd9b622 100644 (file)
@@ -3,7 +3,7 @@
 #define _UAPI_ASM_X86_UNISTD_H
 
 /* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT      0x40000000UL
+#define __X32_SYSCALL_BIT      0x40000000
 
 #ifndef __KERNEL__
 # ifdef __i386__
index 16b9a420e6fd325bee88ed3ff53c2eb245f98746..0efaf45f7367726b788fb0d93a8742dc9eeb3f5e 100644 (file)
@@ -314,6 +314,7 @@ int apply_xbc(const char *path, const char *xbc_path)
        ret = delete_xbc(path);
        if (ret < 0) {
                pr_err("Failed to delete previous boot config: %d\n", ret);
+               free(data);
                return ret;
        }
 
@@ -321,24 +322,27 @@ int apply_xbc(const char *path, const char *xbc_path)
        fd = open(path, O_RDWR | O_APPEND);
        if (fd < 0) {
                pr_err("Failed to open %s: %d\n", path, fd);
+               free(data);
                return fd;
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
        ret = write(fd, data, size + 8);
        if (ret < 0) {
                pr_err("Failed to apply a boot config: %d\n", ret);
-               return ret;
+               goto out;
        }
        /* Write a magic word of the bootconfig */
        ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
        if (ret < 0) {
                pr_err("Failed to apply a boot config magic: %d\n", ret);
-               return ret;
+               goto out;
        }
+       ret = 0;
+out:
        close(fd);
        free(data);
 
-       return 0;
+       return ret;
 }
 
 int usage(void)
index 7427a5ee761b3757feaf04ff76bf3afd7a91a90c..9d8e9613008a99c4998db7d9bfd8ac4c88cf0575 100644 (file)
@@ -159,7 +159,12 @@ class IocgStat:
         else:
             self.inflight_pct = 0
 
-        self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
+        # vdebt used to be an atomic64_t and is now u64, support both
+        try:
+            self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
+        except:
+            self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
+
         self.use_delay = blkg.use_delay.counter.value_()
         self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
 
index f3f3c3fb98cbf0fe24a7fa24aab1b050e51b4d8c..48a9c7c69ef1f78fadd64d0128307c0d67c60e1c 100644 (file)
@@ -148,11 +148,11 @@ struct pt_regs;
 #define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
 #define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
 #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[14])
 #define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
 #define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
 #define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), psw.addr)
 
 #elif defined(bpf_target_arm)
 
index 4b170fd08a28b194a6e3c006c9bcb1237dd6a246..3c6da70e6084144505fd7016b3568d2d951f615d 100644 (file)
@@ -72,6 +72,17 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
        return find_insn(file, func->cfunc->sec, func->cfunc->offset);
 }
 
+static struct instruction *prev_insn_same_sym(struct objtool_file *file,
+                                              struct instruction *insn)
+{
+       struct instruction *prev = list_prev_entry(insn, list);
+
+       if (&prev->list != &file->insn_list && prev->func == insn->func)
+               return prev;
+
+       return NULL;
+}
+
 #define func_for_each_insn(file, func, insn)                           \
        for (insn = find_insn(file, func->sec, func->offset);           \
             insn;                                                      \
@@ -1050,8 +1061,8 @@ static struct rela *find_jump_table(struct objtool_file *file,
         * it.
         */
        for (;
-            &insn->list != &file->insn_list && insn->func && insn->func->pfunc == func;
-            insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+            insn && insn->func && insn->func->pfunc == func;
+            insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
 
                if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
                        break;
@@ -1449,7 +1460,7 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
        struct cfi_reg *cfa = &state->cfa;
        struct stack_op *op = &insn->stack_op;
 
-       if (cfa->base != CFI_SP)
+       if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
                return 0;
 
        /* push */
index 0b79c2353a210ed8a60cd1908beb671e435ef38f..12e01ac190ec1179b81216edc5e72df9c9c17edc 100644 (file)
@@ -87,9 +87,10 @@ struct elf {
 #define OFFSET_STRIDE          (1UL << OFFSET_STRIDE_BITS)
 #define OFFSET_STRIDE_MASK     (~(OFFSET_STRIDE - 1))
 
-#define for_offset_range(_offset, _start, _end)                \
-       for (_offset = ((_start) & OFFSET_STRIDE_MASK); \
-            _offset <= ((_end) & OFFSET_STRIDE_MASK);  \
+#define for_offset_range(_offset, _start, _end)                        \
+       for (_offset = ((_start) & OFFSET_STRIDE_MASK);         \
+            _offset >= ((_start) & OFFSET_STRIDE_MASK) &&      \
+            _offset <= ((_end) & OFFSET_STRIDE_MASK);          \
             _offset += OFFSET_STRIDE)
 
 static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)
index 56d80adcf4bdec8d074d1036a87333901e2bfa59..43d0b5578f461fa1664c2f9160cb401d7a279f42 100644 (file)
@@ -19,7 +19,7 @@ void test_mmap(void)
        const size_t map_sz = roundup_page(sizeof(struct map_data));
        const int zero = 0, one = 1, two = 2, far = 1500;
        const long page_size = sysconf(_SC_PAGE_SIZE);
-       int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
+       int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
        struct bpf_map *data_map, *bss_map;
        void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
        struct test_mmap__bss *bss_data;
@@ -37,6 +37,17 @@ void test_mmap(void)
        data_map = skel->maps.data_map;
        data_map_fd = bpf_map__fd(data_map);
 
+       rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
+       tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
+       if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
+               munmap(tmp1, 4096);
+               goto cleanup;
+       }
+       /* now double-check if it's mmap()'able at all */
+       tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
+       if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
+               goto cleanup;
+
        /* get map's ID */
        memset(&map_info, 0, map_info_sz);
        err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
@@ -217,6 +228,14 @@ void test_mmap(void)
 
        munmap(tmp2, 4 * page_size);
 
+       /* map all 4 pages, but with pg_off=1 page, should fail */
+       tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
+                   data_map_fd, page_size /* initial page shift */);
+       if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
+               munmap(tmp1, 4 * page_size);
+               goto cleanup;
+       }
+
        tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
        if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
                goto cleanup;
index 6239596cd14e635866829ff7f9668191b3b3f714..4eb42cff5fe966e8405770672daa6aaaf82457f4 100644 (file)
@@ -7,6 +7,14 @@
 
 char _license[] SEC("license") = "GPL";
 
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 4096);
+       __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG);
+       __type(key, __u32);
+       __type(value, char);
+} rdonly_map SEC(".maps");
+
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
        __uint(max_entries, 512 * 4); /* at least 4 pages of data */
index 56a50b25cd33bd13668881bc2969fcfb1a35a3bb..abb7344b531f4536fd000ba716c8d953188f0307 100644 (file)
@@ -30,13 +30,13 @@ int prog3(struct bpf_raw_tracepoint_args *ctx)
 SEC("fentry/__set_task_comm")
 int BPF_PROG(prog4, struct task_struct *tsk, const char *buf, bool exec)
 {
-       return !tsk;
+       return 0;
 }
 
 SEC("fexit/__set_task_comm")
 int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
 {
-       return !tsk;
+       return 0;
 }
 
 char _license[] SEC("license") = "GPL";
index a253a064e6e059056d6389b87c5540e5cc37ebfe..58f4aa593b1b527f49f3b042c16920cf4e29a082 100644 (file)
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
        BPF_LD_MAP_FD(BPF_REG_1, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
-       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
        /* r1 = [0x00, 0xff] */
        BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
         *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
         */
        BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
-       /* r1 = 0 or
-        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
-        */
-       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
        /* error on OOB pointer computation */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
        /* exit */
        },
        .fixup_map_hash_8b = { 3 },
        /* not actually fully unbounded, but the bound is very high */
-       .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
-       .result = REJECT
+       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
+       .result_unpriv = REJECT,
+       .errstr = "value -4294967168 makes map_value pointer be out of bounds",
+       .result = REJECT,
 },
 {
        "bounds check after truncation of boundary-crossing range (2)",
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
        BPF_LD_MAP_FD(BPF_REG_1, 0),
        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
-       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
        /* r1 = [0x00, 0xff] */
        BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
         *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
         */
        BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
-       /* r1 = 0 or
-        *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
-        */
-       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
        /* error on OOB pointer computation */
        BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
        /* exit */
        },
        .fixup_map_hash_8b = { 3 },
        /* not actually fully unbounded, but the bound is very high */
-       .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
-       .result = REJECT
+       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
+       .result_unpriv = REJECT,
+       .errstr = "value -4294967168 makes map_value pointer be out of bounds",
+       .result = REJECT,
 },
 {
        "bounds check after wrapping 32-bit addition",
        },
        .result = ACCEPT
 },
+{
+       "assigning 32bit bounds to 64bit for wA = 0, wB = wA",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV32_IMM(BPF_REG_9, 0),
+       BPF_MOV32_REG(BPF_REG_2, BPF_REG_9),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2),
+       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
index cd5e1f602ac9db1421d2b79adc2597ec20d8abca..909da9cdda97f5e66497ea86a3992f8d2bd12fbe 100644 (file)
@@ -351,6 +351,7 @@ static int test_alloc_errors(char *heap_name)
        }
 
        printf("Expected error checking passed\n");
+       ret = 0;
 out:
        if (dmabuf_fd >= 0)
                close(dmabuf_fd);
index 24dd8ed485802423be0279166d6735b33f261d71..b025daea062d876e782ae28a3a54dcc9998814eb 100755 (executable)
@@ -300,7 +300,7 @@ test_uc_aware()
        local i
 
        for ((i = 0; i < attempts; ++i)); do
-               if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then
+               if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then
                        ((passes++))
                fi
 
index dbd1e014ba17febfe59bc33c2c508538dd782c63..da49ad2761b5510f13f81f9289d139072eb4d5c9 100755 (executable)
@@ -264,6 +264,8 @@ trap_policer_test()
        local packets_t0
        local packets_t1
 
+       RET=0
+
        if [ $(devlink_trap_policers_num_get) -eq 0 ]; then
                check_err 1 "Failed to dump policers"
        fi
@@ -328,6 +330,8 @@ trap_group_check_policer()
 
 trap_policer_bind_test()
 {
+       RET=0
+
        devlink trap group set $DEVLINK_DEV group l2_drops policer 1
        check_err $? "Failed to bind a valid policer"
        if [ $(devlink_trap_group_policer_get "l2_drops") -ne 1 ]; then
index 11eee0b600407e2f83a036ae8d422acf0c744a56..d979ff14775ad87a3a3077c487c6ca08906261c6 100644 (file)
@@ -3,6 +3,7 @@
 #define _GNU_SOURCE
 #include <poll.h>
 #include <unistd.h>
+#include <assert.h>
 #include <signal.h>
 #include <pthread.h>
 #include <sys/epoll.h>
@@ -3136,4 +3137,149 @@ TEST(epoll59)
        close(ctx.sfd[0]);
 }
 
+enum {
+       EPOLL60_EVENTS_NR = 10,
+};
+
+struct epoll60_ctx {
+       volatile int stopped;
+       int ready;
+       int waiters;
+       int epfd;
+       int evfd[EPOLL60_EVENTS_NR];
+};
+
+static void *epoll60_wait_thread(void *ctx_)
+{
+       struct epoll60_ctx *ctx = ctx_;
+       struct epoll_event e;
+       sigset_t sigmask;
+       uint64_t v;
+       int ret;
+
+       /* Block SIGUSR1 */
+       sigemptyset(&sigmask);
+       sigaddset(&sigmask, SIGUSR1);
+       sigprocmask(SIG_SETMASK, &sigmask, NULL);
+
+       /* Prepare empty mask for epoll_pwait() */
+       sigemptyset(&sigmask);
+
+       while (!ctx->stopped) {
+               /* Mark we are ready */
+               __atomic_fetch_add(&ctx->ready, 1, __ATOMIC_ACQUIRE);
+
+               /* Start when all are ready */
+               while (__atomic_load_n(&ctx->ready, __ATOMIC_ACQUIRE) &&
+                      !ctx->stopped);
+
+               /* Account this waiter */
+               __atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE);
+
+               ret = epoll_pwait(ctx->epfd, &e, 1, 2000, &sigmask);
+               if (ret != 1) {
+                       /* We expect only signal delivery on stop */
+                       assert(ret < 0 && errno == EINTR && "Lost wakeup!\n");
+                       assert(ctx->stopped);
+                       break;
+               }
+
+               ret = read(e.data.fd, &v, sizeof(v));
+               /* Since we are on ET mode, thus each thread gets its own fd. */
+               assert(ret == sizeof(v));
+
+               __atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE);
+       }
+
+       return NULL;
+}
+
+static inline unsigned long long msecs(void)
+{
+       struct timespec ts;
+       unsigned long long msecs;
+
+       clock_gettime(CLOCK_REALTIME, &ts);
+       msecs = ts.tv_sec * 1000ull;
+       msecs += ts.tv_nsec / 1000000ull;
+
+       return msecs;
+}
+
+static inline int count_waiters(struct epoll60_ctx *ctx)
+{
+       return __atomic_load_n(&ctx->waiters, __ATOMIC_ACQUIRE);
+}
+
+TEST(epoll60)
+{
+       struct epoll60_ctx ctx = { 0 };
+       pthread_t waiters[ARRAY_SIZE(ctx.evfd)];
+       struct epoll_event e;
+       int i, n, ret;
+
+       signal(SIGUSR1, signal_handler);
+
+       ctx.epfd = epoll_create1(0);
+       ASSERT_GE(ctx.epfd, 0);
+
+       /* Create event fds */
+       for (i = 0; i < ARRAY_SIZE(ctx.evfd); i++) {
+               ctx.evfd[i] = eventfd(0, EFD_NONBLOCK);
+               ASSERT_GE(ctx.evfd[i], 0);
+
+               e.events = EPOLLIN | EPOLLET;
+               e.data.fd = ctx.evfd[i];
+               ASSERT_EQ(epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd[i], &e), 0);
+       }
+
+       /* Create waiter threads */
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               ASSERT_EQ(pthread_create(&waiters[i], NULL,
+                                        epoll60_wait_thread, &ctx), 0);
+
+       for (i = 0; i < 300; i++) {
+               uint64_t v = 1, ms;
+
+               /* Wait for all to be ready */
+               while (__atomic_load_n(&ctx.ready, __ATOMIC_ACQUIRE) !=
+                      ARRAY_SIZE(ctx.evfd))
+                       ;
+
+               /* Steady, go */
+               __atomic_fetch_sub(&ctx.ready, ARRAY_SIZE(ctx.evfd),
+                                  __ATOMIC_ACQUIRE);
+
+               /* Wait all have gone to kernel */
+               while (count_waiters(&ctx) != ARRAY_SIZE(ctx.evfd))
+                       ;
+
+               /* 1ms should be enough to schedule away */
+               usleep(1000);
+
+               /* Quickly signal all handles at once */
+               for (n = 0; n < ARRAY_SIZE(ctx.evfd); n++) {
+                       ret = write(ctx.evfd[n], &v, sizeof(v));
+                       ASSERT_EQ(ret, sizeof(v));
+               }
+
+               /* Busy loop for 1s and wait for all waiters to wake up */
+               ms = msecs();
+               while (count_waiters(&ctx) && msecs() < ms + 1000)
+                       ;
+
+               ASSERT_EQ(count_waiters(&ctx), 0);
+       }
+       ctx.stopped = 1;
+       /* Stop waiters */
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               ret = pthread_kill(waiters[i], SIGUSR1);
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               pthread_join(waiters[i], NULL);
+
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               close(ctx.evfd[i]);
+       close(ctx.epfd);
+}
+
 TEST_HARNESS_MAIN
index 063ecb290a5a3442bb116639dbc7ac866c0f0639..a4605b5ee66d97dc12d92fb80bbbba2cf89ea106 100755 (executable)
@@ -17,6 +17,7 @@ echo "                -v|--verbose Increase verbosity of test messages"
 echo "         -vv        Alias of -v -v (Show all results in stdout)"
 echo "         -vvv       Alias of -v -v -v (Show all commands immediately)"
 echo "         --fail-unsupported Treat UNSUPPORTED as a failure"
+echo "         --fail-unresolved Treat UNRESOLVED as a failure"
 echo "         -d|--debug Debug mode (trace all shell commands)"
 echo "         -l|--logdir <dir> Save logs on the <dir>"
 echo "                     If <dir> is -, all logs output in console only"
@@ -29,8 +30,25 @@ err_ret=1
 # kselftest skip code is 4
 err_skip=4
 
+# cgroup RT scheduling prevents chrt commands from succeeding, which
+# induces failures in test wakeup tests.  Disable for the duration of
+# the tests.
+
+readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us
+
+sched_rt_runtime_orig=$(cat $sched_rt_runtime)
+
+setup() {
+  echo -1 > $sched_rt_runtime
+}
+
+cleanup() {
+  echo $sched_rt_runtime_orig > $sched_rt_runtime
+}
+
 errexit() { # message
   echo "Error: $1" 1>&2
+  cleanup
   exit $err_ret
 }
 
@@ -39,6 +57,8 @@ if [ `id -u` -ne 0 ]; then
   errexit "this must be run by root user"
 fi
 
+setup
+
 # Utilities
 absdir() { # file_path
   (cd `dirname $1`; pwd)
@@ -93,6 +113,10 @@ parse_opts() { # opts
       UNSUPPORTED_RESULT=1
       shift 1
     ;;
+    --fail-unresolved)
+      UNRESOLVED_RESULT=1
+      shift 1
+    ;;
     --logdir|-l)
       LOG_DIR=$2
       shift 2
@@ -157,6 +181,7 @@ KEEP_LOG=0
 DEBUG=0
 VERBOSE=0
 UNSUPPORTED_RESULT=0
+UNRESOLVED_RESULT=0
 STOP_FAILURE=0
 # Parse command-line options
 parse_opts $*
@@ -235,6 +260,7 @@ TOTAL_RESULT=0
 
 INSTANCE=
 CASENO=0
+
 testcase() { # testfile
   CASENO=$((CASENO+1))
   desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
@@ -260,7 +286,7 @@ eval_result() { # sigval
     $UNRESOLVED)
       prlog "  [${color_blue}UNRESOLVED${color_reset}]"
       UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
-      return 1 # this is a kind of bug.. something happened.
+      return $UNRESOLVED_RESULT # depends on use case
     ;;
     $UNTESTED)
       prlog "  [${color_blue}UNTESTED${color_reset}]"
@@ -273,7 +299,7 @@ eval_result() { # sigval
       return $UNSUPPORTED_RESULT # depends on use case
     ;;
     $XFAIL)
-      prlog "  [${color_red}XFAIL${color_reset}]"
+      prlog "  [${color_green}XFAIL${color_reset}]"
       XFAILED_CASES="$XFAILED_CASES $CASENO"
       return 0
     ;;
@@ -406,5 +432,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
 
+cleanup
+
 # if no error, return 0
 exit $TOTAL_RESULT
index aefab0c66d5482e1c56160cfd13d44d68ed820f2..f59853857ad227f87eadc6ce1ab7545d9472e9d4 100644 (file)
@@ -10,10 +10,7 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_reset() {
     if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then
index c8a5209f21191ef2cae389717044a70dc627908a..d610f47edd90b361018c7cff3c8fd1c333b80a22 100644 (file)
@@ -9,6 +9,8 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported
 fi
 
+check_filter_file set_ftrace_filter
+
 fail() { # msg
     echo $1
     exit_fail
index f4e92afab14b2485d66076fd2b002341eb6a0c7a..28936f434ee5e7e986ebba4890fe3d879f8ce16f 100644 (file)
@@ -9,6 +9,8 @@ if ! grep -q function available_tracers; then
     exit_unsupported
 fi
 
+check_filter_file set_ftrace_filter
+
 disable_tracing
 clear_trace
 
index 8aa46a2ea133819c7ad448db44ea98f17fc1a5f0..71db68a7975f2532c97a817df17c9147e019d428 100644 (file)
@@ -15,10 +15,7 @@ if [ ! -f set_ftrace_notrace_pid ]; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is function tracer not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_function_fork=1
 
index f2ee1e889e1350e2c77c0c33b713de96afabc86d..d58403c4b7cdad155aa99143e4972572645e6c3b 100644 (file)
@@ -16,10 +16,7 @@ if [ ! -f set_ftrace_pid ]; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is function tracer not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_function_fork=1
 
index 1a52f2883fe02cd5718903be31e7d6a03cc31639..b2aff786c1a204680caf8d8ff8f1fd98a1625092 100644 (file)
@@ -3,7 +3,7 @@
 # description: ftrace - stacktrace filter command
 # flags: instance
 
-[ ! -f set_ftrace_filter ] && exit_unsupported
+check_filter_file set_ftrace_filter
 
 echo _do_fork:stacktrace >> set_ftrace_filter
 
index ca2ffd7957f9e7e8feaa2d32f18a8f13376a7fd6..e9b1fd534e9661ca10bac0b288715168cd3ba895 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_reset() {
     reset_ftrace_filter
index 9330c873f9fe93e22341ba62b19989d59c63d01b..1a4b4a442d33ebd52ff97f1405de91b0a4ab17e8 100644 (file)
@@ -2,7 +2,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function trace on module
 
-[ ! -f set_ftrace_filter ] && exit_unsupported
+check_filter_file set_ftrace_filter
 
 : "mod: allows to filter a non exist function"
 echo 'non_exist_func:mod:non_exist_module' > set_ftrace_filter
index dfbae637c60c7618efd87df6ae349d2d0093dd3b..a3dadb6b93b4c81bda80ec762e8f01c162e83f24 100644 (file)
@@ -18,10 +18,7 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported;
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 if [ ! -f function_profile_enabled ]; then
     echo "function_profile_enabled not found, function profiling enabled?"
index 51f6e6146bd93a8c422d9c79cc30d02fdc010cc1..70bad441fa7d3428658b0003f07d4b3a2ea9bfe1 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 fail() { # mesg
     echo $1
index b414f0e3c64629ed3e0b118a18c5fa066c9fcdd0..51e9e80bc0e677487480b732b82f1fc342a2d59c 100644 (file)
@@ -8,6 +8,8 @@ if [ ! -f stack_trace ]; then
   exit_unsupported
 fi
 
+check_filter_file stack_trace_filter
+
 echo > stack_trace_filter
 echo 0 > stack_max_size
 echo 1 > /proc/sys/kernel/stack_tracer_enabled
index 1947387fe97607f27f225546dffb7152f458198a..3ed173f2944ac8e202086417342e2b02aa864653 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 fail() { # mesg
     echo $1
index 5d4550591ff9f789d0f783c891b3cd766ceee27d..61a3c7e2634dfed97a6b95f85497a8dcd893acfe 100644 (file)
@@ -1,3 +1,9 @@
+check_filter_file() { # check filter file introduced by dynamic ftrace
+    if [ ! -f "$1" ]; then
+        echo "$1 not found? Is dynamic ftrace not set?"
+        exit_unsupported
+    fi
+}
 
 clear_trace() { # reset trace output
     echo > trace
index 1bcb67dcae26797ffea12e57ff65d12ec4b92e55..81490ecaaa9276de971f250e46f0400177d92b08 100644 (file)
@@ -38,7 +38,7 @@ for width in 64 32 16 8; do
   echo 0 > events/kprobes/testprobe/enable
 
   : "Confirm the arguments is recorded in given types correctly"
-  ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
+  ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
   check_types $ARGS $width
 
   : "Clear event for next loop"
index 7650a82db3f571d1ae9355e7edd6deb2812688cc..df5072815b87e450fbf0b8a32b3d3578f97a8d78 100644 (file)
@@ -5,6 +5,8 @@
 [ -f kprobe_events ] || exit_unsupported # this is configurable
 grep "function" available_tracers || exit_unsupported # this is configurable
 
+check_filter_file set_ftrace_filter
+
 # prepare
 echo nop > current_tracer
 echo _do_fork > set_ftrace_filter
index cbd174334a480f914a64ed6c693797ddeb7c5250..2b82c80edf69e5ee426999a3a37bf36f2e669f89 100644 (file)
@@ -17,7 +17,14 @@ unsup() { #msg
     exit_unsupported
 }
 
-modprobe $MOD || unsup "$MOD module not available"
+unres() { #msg
+    reset_tracer
+    rmmod $MOD || true
+    echo $1
+    exit_unresolved
+}
+
+modprobe $MOD || unres "$MOD module not available"
 rmmod $MOD
 
 grep -q "preemptoff" available_tracers || unsup "preemptoff tracer not enabled"
index 0bb80619db580af1dcb23ff8fde3235d20a2c825..32bdc978a711c1a309bdd2702f779f5e8e60ac82 100644 (file)
@@ -1,13 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0
 
-MOUNT_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null)
-MOUNT_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null)
-ifeq ($(MOUNT_LDLIBS),)
-MOUNT_LDLIBS := -lmount -I/usr/include/libmount
+VAR_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null)
+VAR_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS := -lmount -I/usr/include/libmount
 endif
 
-CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(MOUNT_CFLAGS)
-LDLIBS += $(MOUNT_LDLIBS)
+CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(VAR_CFLAGS)
+LDLIBS += $(VAR_LDLIBS)
 
 TEST_PROGS := gpio-mockup.sh
 TEST_FILES := gpio-mockup-sysfs.sh
index 7340fd6a9a9f2cbd0d0f16ad7388d674b0ae0fb5..39f0fa2a8fd63ff51bfeb062831d6e95aaea4b0e 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
-LDLIBS := $(LDLIBS) -lm
+LDLIBS += -lm
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
new file mode 100755 (executable)
index 0000000..bbc0464
--- /dev/null
@@ -0,0 +1,272 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# kselftest_deps.sh
+#
+# Checks for kselftest build dependencies on the build system.
+# Copyright (c) 2020 Shuah Khan <skhan@linuxfoundation.org>
+#
+#
+
+usage()
+{
+
+echo -e "Usage: $0 -[p] <compiler> [test_name]\n"
+echo -e "\tkselftest_deps.sh [-p] gcc"
+echo -e "\tkselftest_deps.sh [-p] gcc vm"
+echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc"
+echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc vm\n"
+echo "- Should be run in selftests directory in the kernel repo."
+echo "- Checks if Kselftests can be built/cross-built on a system."
+echo "- Parses all test/sub-test Makefile to find library dependencies."
+echo "- Runs compile test on a trivial C file with LDLIBS specified"
+echo "  in the test Makefiles to identify missing library dependencies."
+echo "- Prints suggested target list for a system filtering out tests"
+echo "  failed the build dependency check from the TARGETS in Selftests"
+echo "  main Makefile when optional -p is specified."
+echo "- Prints pass/fail dependency check for each tests/sub-test."
+echo "- Prints pass/fail targets and libraries."
+echo "- Default: runs dependency checks on all tests."
+echo "- Optional test name can be specified to check dependencies for it."
+exit 1
+
+}
+
+# Start main()
+main()
+{
+
+base_dir=`pwd`
+# Make sure we're in the selftests top-level directory.
+if [ $(basename "$base_dir") !=  "selftests" ]; then
+       echo -e "\tPlease run $0 in"
+       echo -e "\ttools/testing/selftests directory ..."
+       exit 1
+fi
+
+print_targets=0
+
+while getopts "p" arg; do
+    case $arg in
+        p)
+               print_targets=1
+       shift;;
+    esac
+done
+
+if [ $# -eq 0 ]
+then
+       usage
+fi
+
+# Compiler
+CC=$1
+
+tmp_file=$(mktemp).c
+trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
+#echo $tmp_file
+
+pass=$(mktemp).out
+trap "rm -f $pass" EXIT
+#echo $pass
+
+fail=$(mktemp).out
+trap "rm -f $fail" EXIT
+#echo $fail
+
+# Generate tmp source fire for compile test
+cat << "EOF" > $tmp_file
+int main()
+{
+}
+EOF
+
+# Save results
+total_cnt=0
+fail_trgts=()
+fail_libs=()
+fail_cnt=0
+pass_trgts=()
+pass_libs=()
+pass_cnt=0
+
+# Get all TARGETS from selftests Makefile
+targets=$(egrep "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+
+# Single test case
+if [ $# -eq 2 ]
+then
+       test=$2/Makefile
+
+       l1_test $test
+       l2_test $test
+       l3_test $test
+
+       print_results $1 $2
+       exit $?
+fi
+
+# Level 1: LDLIBS set static.
+#
+# Find all LDLIBS set statically for all executables built by a Makefile
+# and filter out VAR_LDLIBS to discard the following:
+#      gpio/Makefile:LDLIBS += $(VAR_LDLIBS)
+# Append space at the end of the list to append more tests.
+
+l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+               grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+
+# Level 2: LDLIBS set dynamically.
+#
+# Level 2
+# Some tests have multiple valid LDLIBS lines for individual sub-tests
+# that need dependency checks. Find them and append them to the tests
+# e.g: vm/Makefile:$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
+# Filter out VAR_LDLIBS to discard the following:
+#      memfd/Makefile:$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS)
+# Append space at the end of the list to append more tests.
+
+l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+               grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+
+# Level 3
+# gpio,  memfd and others use pkg-config to find mount and fuse libs
+# respectively and save it in VAR_LDLIBS. If pkg-config doesn't find
+# any, VAR_LDLIBS set to default.
+# Use the default value and filter out pkg-config for dependency check.
+# e.g:
+# gpio/Makefile
+#      VAR_LDLIBS := $(shell pkg-config --libs mount) 2>/dev/null)
+# memfd/Makefile
+#      VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+
+l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
+               grep -v "pkg-config" | awk -F: '{print $1}')
+
+#echo $l1_tests
+#echo $l2_1_tests
+#echo $l3_tests
+
+all_tests
+print_results $1 $2
+
+exit $?
+}
+# end main()
+
+all_tests()
+{
+       for test in $l1_tests; do
+               l1_test $test
+       done
+
+       for test in $l2_tests; do
+               l2_test $test
+       done
+
+       for test in $l3_tests; do
+               l3_test $test
+       done
+}
+
+# Use same parsing used for l1_tests and pick libraries this time.
+l1_test()
+{
+       test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
+                       grep -v "VAR_LDLIBS" | \
+                       sed -e 's/\:/ /' | \
+                       sed -e 's/+/ /' | cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+# Use same parsing used for l2__tests and pick libraries this time.
+l2_test()
+{
+       test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
+                       grep -v "VAR_LDLIBS" | \
+                       sed -e 's/\:/ /' | sed -e 's/+/ /' | \
+                       cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+l3_test()
+{
+       test_libs=$(grep --include=Makefile "^VAR_LDLIBS" $test | \
+                       grep -v "pkg-config" | sed -e 's/\:/ /' |
+                       sed -e 's/+/ /' | cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+check_libs()
+{
+
+if [[ ! -z "${test_libs// }" ]]
+then
+
+       #echo $test_libs
+
+       for lib in $test_libs; do
+
+       let total_cnt+=1
+       $CC -o $tmp_file.bin $lib $tmp_file > /dev/null 2>&1
+       if [ $? -ne 0 ]; then
+               echo "FAIL: $test dependency check: $lib" >> $fail
+               let fail_cnt+=1
+               fail_libs+="$lib "
+               fail_target=$(echo "$test" | cut -d "/" -f1)
+               fail_trgts+="$fail_target "
+               targets=$(echo "$targets" | grep -v "$fail_target")
+       else
+               echo "PASS: $test dependency check passed $lib" >> $pass
+               let pass_cnt+=1
+               pass_libs+="$lib "
+               pass_trgts+="$(echo "$test" | cut -d "/" -f1) "
+       fi
+
+       done
+fi
+}
+
+print_results()
+{
+       echo -e "========================================================";
+       echo -e "Kselftest Dependency Check for [$0 $1 $2] results..."
+
+       if [ $print_targets -ne 0 ]
+       then
+       echo -e "Suggested Selftest Targets for your configuration:"
+       echo -e "$targets";
+       fi
+
+       echo -e "========================================================";
+       echo -e "Checked tests defining LDLIBS dependencies"
+       echo -e "--------------------------------------------------------";
+       echo -e "Total tests with Dependencies:"
+       echo -e "$total_cnt Pass: $pass_cnt Fail: $fail_cnt";
+
+       if [ $pass_cnt -ne 0 ]; then
+       echo -e "--------------------------------------------------------";
+       cat $pass
+       echo -e "--------------------------------------------------------";
+       echo -e "Targets passed build dependency check on system:"
+       echo -e "$(echo "$pass_trgts" | xargs -n1 | sort -u | xargs)"
+       fi
+
+       if [ $fail_cnt -ne 0 ]; then
+       echo -e "--------------------------------------------------------";
+       cat $fail
+       echo -e "--------------------------------------------------------";
+       echo -e "Targets failed build dependency check on system:"
+       echo -e "$(echo "$fail_trgts" | xargs -n1 | sort -u | xargs)"
+       echo -e "--------------------------------------------------------";
+       echo -e "Missing libraries system"
+       echo -e "$(echo "$fail_libs" | xargs -n1 | sort -u | xargs)"
+       fi
+
+       echo -e "--------------------------------------------------------";
+       echo -e "========================================================";
+}
+
+main "$@"
index 712a2ddd2a2711a8271706fd2d37501ae3a3386d..42f4f49f2a488b9c90cc0f030cbf0dc12d9bf649 100644 (file)
@@ -5,8 +5,34 @@ all:
 
 top_srcdir = ../../../..
 KSFT_KHDR_INSTALL := 1
+
+# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
+# directories and targets in this Makefile. "uname -m" doesn't map to
+# arch specific sub-directory names.
+#
+# UNAME_M variable to used to run the compiles pointing to the right arch
+# directories and build the right targets for these supported architectures.
+#
+# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
+# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
+#
+# x86_64 targets are named to include x86_64 as a suffix and directories
+# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
+# same convention. "uname -m" doesn't result in the correct mapping for
+# s390x and aarch64.
+#
+# No change necessary for x86_64
 UNAME_M := $(shell uname -m)
 
+# Set UNAME_M for arm64 compile/install to work
+ifeq ($(ARCH),arm64)
+       UNAME_M := aarch64
+endif
+# Set UNAME_M s390x compile/install to work
+ifeq ($(ARCH),s390)
+       UNAME_M := s390x
+endif
+
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
@@ -28,6 +54,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
+TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
@@ -53,7 +80,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
 INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include
+LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
        -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
        -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
@@ -84,6 +111,7 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
 $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
        $(AR) crs $@ $^
 
+x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
 all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
 
index d8f4d6bfe05d731ca7c2fc2c0b45977f90a479da..a034438b62662b4c103d46b93f854bd9999a9490 100644 (file)
@@ -219,8 +219,8 @@ struct hv_enlightened_vmcs {
 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
                (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
 
-struct hv_enlightened_vmcs *current_evmcs;
-struct hv_vp_assist_page *current_vp_assist;
+extern struct hv_enlightened_vmcs *current_evmcs;
+extern struct hv_vp_assist_page *current_vp_assist;
 
 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
 
index a99b875f50d24c49cb06c98244b78ce376682577..92e184a422eedf97115ad3ab96460d307a16fa58 100644 (file)
@@ -143,6 +143,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
 int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
 void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
+                         struct kvm_guest_debug *debug);
 void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
                       struct kvm_mp_state *mp_state);
 void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
index 8a3523d4434fb3fa5887239bc9e70ee7c9992ff4..9622431069bc4597de3e4ab55e90baa4ec7e7b65 100644 (file)
@@ -1201,6 +1201,15 @@ void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
                    ret, errno);
 }
 
+void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
+                         struct kvm_guest_debug *debug)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+       int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
+
+       TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
+}
+
 /*
  * VM VCPU Set MP State
  *
index 6f17f69394be53eb40466cab71b975f59296bfe3..4ae104f6ce69f3c9285b81042ff82f9f024d4b16 100644 (file)
@@ -17,6 +17,9 @@
 
 bool enable_evmcs;
 
+struct hv_enlightened_vmcs *current_evmcs;
+struct hv_vp_assist_page *current_vp_assist;
+
 struct eptPageTableEntry {
        uint64_t readable:1;
        uint64_t writable:1;
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
new file mode 100644 (file)
index 0000000..8162c58
--- /dev/null
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM guest debug register tests
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include <stdio.h>
+#include <string.h>
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 0
+
+#define DR6_BD         (1 << 13)
+#define DR7_GD         (1 << 13)
+
+/* For testing data access debug BP */
+uint32_t guest_value;
+
+extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
+
+static void guest_code(void)
+{
+       /*
+        * Software BP tests.
+        *
+        * NOTE: sw_bp need to be before the cmd here, because int3 is an
+        * exception rather than a normal trap for KVM_SET_GUEST_DEBUG (we
+        * capture it using the vcpu exception bitmap).
+        */
+       asm volatile("sw_bp: int3");
+
+       /* Hardware instruction BP test */
+       asm volatile("hw_bp: nop");
+
+       /* Hardware data BP test */
+       asm volatile("mov $1234,%%rax;\n\t"
+                    "mov %%rax,%0;\n\t write_data:"
+                    : "=m" (guest_value) : : "rax");
+
+       /* Single step test, covers 2 basic instructions and 2 emulated */
+       asm volatile("ss_start: "
+                    "xor %%rax,%%rax\n\t"
+                    "cpuid\n\t"
+                    "movl $0x1a0,%%ecx\n\t"
+                    "rdmsr\n\t"
+                    : : : "rax", "ecx");
+
+       /* DR6.BD test */
+       asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
+       GUEST_DONE();
+}
+
+#define  CLEAR_DEBUG()  memset(&debug, 0, sizeof(debug))
+#define  APPLY_DEBUG()  vcpu_set_guest_debug(vm, VCPU_ID, &debug)
+#define  CAST_TO_RIP(v)  ((unsigned long long)&(v))
+#define  SET_RIP(v)  do {                              \
+               vcpu_regs_get(vm, VCPU_ID, &regs);      \
+               regs.rip = (v);                         \
+               vcpu_regs_set(vm, VCPU_ID, &regs);      \
+       } while (0)
+#define  MOVE_RIP(v)  SET_RIP(regs.rip + (v));
+
+int main(void)
+{
+       struct kvm_guest_debug debug;
+       unsigned long long target_dr6, target_rip;
+       struct kvm_regs regs;
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+       struct ucall uc;
+       uint64_t cmd;
+       int i;
+       /* Instruction lengths starting at ss_start */
+       int ss_size[4] = {
+               3,              /* xor */
+               2,              /* cpuid */
+               5,              /* mov */
+               2,              /* rdmsr */
+       };
+
+       if (!kvm_check_cap(KVM_CAP_SET_GUEST_DEBUG)) {
+               print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
+               return 0;
+       }
+
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+       run = vcpu_state(vm, VCPU_ID);
+
+       /* Test software BPs - int3 */
+       CLEAR_DEBUG();
+       debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+       APPLY_DEBUG();
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
+                   run->debug.arch.exception == BP_VECTOR &&
+                   run->debug.arch.pc == CAST_TO_RIP(sw_bp),
+                   "INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
+                   run->exit_reason, run->debug.arch.exception,
+                   run->debug.arch.pc, CAST_TO_RIP(sw_bp));
+       MOVE_RIP(1);
+
+       /* Test instruction HW BP over DR[0-3] */
+       for (i = 0; i < 4; i++) {
+               CLEAR_DEBUG();
+               debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+               debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
+               debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
+               APPLY_DEBUG();
+               vcpu_run(vm, VCPU_ID);
+               target_dr6 = 0xffff0ff0 | (1UL << i);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
+                           run->debug.arch.exception == DB_VECTOR &&
+                           run->debug.arch.pc == CAST_TO_RIP(hw_bp) &&
+                           run->debug.arch.dr6 == target_dr6,
+                           "INS_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
+                           "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
+                           i, run->exit_reason, run->debug.arch.exception,
+                           run->debug.arch.pc, CAST_TO_RIP(hw_bp),
+                           run->debug.arch.dr6, target_dr6);
+       }
+       /* Skip "nop" */
+       MOVE_RIP(1);
+
+       /* Test data access HW BP over DR[0-3] */
+       for (i = 0; i < 4; i++) {
+               CLEAR_DEBUG();
+               debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+               debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
+               debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
+                   (0x000d0000UL << (4*i));
+               APPLY_DEBUG();
+               vcpu_run(vm, VCPU_ID);
+               target_dr6 = 0xffff0ff0 | (1UL << i);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
+                           run->debug.arch.exception == DB_VECTOR &&
+                           run->debug.arch.pc == CAST_TO_RIP(write_data) &&
+                           run->debug.arch.dr6 == target_dr6,
+                           "DATA_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
+                           "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
+                           i, run->exit_reason, run->debug.arch.exception,
+                           run->debug.arch.pc, CAST_TO_RIP(write_data),
+                           run->debug.arch.dr6, target_dr6);
+               /* Rollback the 4-bytes "mov" */
+               MOVE_RIP(-7);
+       }
+       /* Skip the 4-bytes "mov" */
+       MOVE_RIP(7);
+
+       /* Test single step */
+       target_rip = CAST_TO_RIP(ss_start);
+       target_dr6 = 0xffff4ff0ULL;
+       vcpu_regs_get(vm, VCPU_ID, &regs);
+       for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
+               target_rip += ss_size[i];
+               CLEAR_DEBUG();
+               debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
+               debug.arch.debugreg[7] = 0x00000400;
+               APPLY_DEBUG();
+               vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
+                           run->debug.arch.exception == DB_VECTOR &&
+                           run->debug.arch.pc == target_rip &&
+                           run->debug.arch.dr6 == target_dr6,
+                           "SINGLE_STEP[%d]: exit %d exception %d rip 0x%llx "
+                           "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
+                           i, run->exit_reason, run->debug.arch.exception,
+                           run->debug.arch.pc, target_rip, run->debug.arch.dr6,
+                           target_dr6);
+       }
+
+       /* Finally test global disable */
+       CLEAR_DEBUG();
+       debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+       debug.arch.debugreg[7] = 0x400 | DR7_GD;
+       APPLY_DEBUG();
+       vcpu_run(vm, VCPU_ID);
+       target_dr6 = 0xffff0ff0 | DR6_BD;
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
+                   run->debug.arch.exception == DB_VECTOR &&
+                   run->debug.arch.pc == CAST_TO_RIP(bd_start) &&
+                   run->debug.arch.dr6 == target_dr6,
+                           "DR7.GD: exit %d exception %d rip 0x%llx "
+                           "(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
+                           run->exit_reason, run->debug.arch.exception,
+                           run->debug.arch.pc, target_rip, run->debug.arch.dr6,
+                           target_dr6);
+
+       /* Disable all debug controls, run to the end */
+       CLEAR_DEBUG();
+       APPLY_DEBUG();
+
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
+       cmd = get_ucall(vm, VCPU_ID, &uc);
+       TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
+
+       kvm_vm_free(vm);
+
+       return 0;
+}
index dadf819148a431bf9a89a08a49bd592a73157192..ee64ff8df8f4db84adbf17ca3ba8380c5d293b2f 100755 (executable)
@@ -25,13 +25,13 @@ fi
 # Figure out which test to run from our script name.
 test=$(basename $0 .sh)
 # Look up details about the test from master list of LKDTM tests.
-line=$(egrep '^#?'"$test"'\b' tests.txt)
+line=$(grep -E '^#?'"$test"'\b' tests.txt)
 if [ -z "$line" ]; then
        echo "Skipped: missing test '$test' in tests.txt"
        exit $KSELFTEST_SKIP_TEST
 fi
 # Check that the test is known to LKDTM.
-if ! egrep -q '^'"$test"'$' "$TRIGGER" ; then
+if ! grep -E -q '^'"$test"'$' "$TRIGGER" ; then
        echo "Skipped: test '$test' missing in $TRIGGER!"
        exit $KSELFTEST_SKIP_TEST
 fi
@@ -59,30 +59,32 @@ if [ -z "$expect" ]; then
        expect="call trace:"
 fi
 
-# Clear out dmesg for output reporting
-dmesg -c >/dev/null
-
 # Prepare log for report checking
-LOG=$(mktemp --tmpdir -t lkdtm-XXXXXX)
+LOG=$(mktemp --tmpdir -t lkdtm-log-XXXXXX)
+DMESG=$(mktemp --tmpdir -t lkdtm-dmesg-XXXXXX)
 cleanup() {
-       rm -f "$LOG"
+       rm -f "$LOG" "$DMESG"
 }
 trap cleanup EXIT
 
+# Save existing dmesg so we can detect new content below
+dmesg > "$DMESG"
+
 # Most shells yell about signals and we're expecting the "cat" process
 # to usually be killed by the kernel. So we have to run it in a sub-shell
 # and silence errors.
 ($SHELL -c 'cat <(echo '"$test"') >'"$TRIGGER" 2>/dev/null) || true
 
 # Record and dump the results
-dmesg -c >"$LOG"
+dmesg | diff --changed-group-format='%>' --unchanged-group-format='' "$DMESG" - > "$LOG" || true
+
 cat "$LOG"
 # Check for expected output
-if egrep -qi "$expect" "$LOG" ; then
+if grep -E -qi "$expect" "$LOG" ; then
        echo "$test: saw '$expect': ok"
        exit 0
 else
-       if egrep -qi XFAIL: "$LOG" ; then
+       if grep -E -qi XFAIL: "$LOG" ; then
                echo "$test: saw 'XFAIL': [SKIP]"
                exit $KSELFTEST_SKIP_TEST
        else
index 187b14cad00cb37abfc7a24799de89d54fb7d8a7..4da8b565fa326e54720eb6b8041b997ef98f3b91 100644 (file)
@@ -8,11 +8,21 @@ TEST_GEN_PROGS := memfd_test
 TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh
 TEST_GEN_FILES := fuse_test fuse_mnt
 
-fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
+VAR_CFLAGS := $(shell pkg-config fuse --cflags 2>/dev/null)
+ifeq ($(VAR_CFLAGS),)
+VAR_CFLAGS := -D_FILE_OFFSET_BITS=64 -I/usr/include/fuse
+endif
+
+VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS := -lfuse -pthread
+endif
+
+fuse_mnt.o: CFLAGS += $(VAR_CFLAGS)
 
 include ../lib.mk
 
-$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
+$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS)
 
 $(OUTPUT)/memfd_test: memfd_test.c common.c
 $(OUTPUT)/fuse_test: fuse_test.c common.c
index 9172746b6cf026faeb87663673d8ea3cc462f0a9..15f4f46ca3a90f04bcf464d6e9618d909b5a7cc7 100755 (executable)
@@ -30,7 +30,7 @@ ret=0
 
 cleanup()
 {
-       rm -f $out
+       rm -f $err
        ip netns del $ns1
 }
 
index 35505b31e5cc092453ea7b72d9dba45bed2d6549..4555f88252bafd31d6c225590316f03b08d3b132 100644 (file)
@@ -165,9 +165,10 @@ void *child_thread(void *arg)
                        socklen_t zc_len = sizeof(zc);
                        int res;
 
+                       memset(&zc, 0, sizeof(zc));
                        zc.address = (__u64)((unsigned long)addr);
                        zc.length = chunk_size;
-                       zc.recv_skip_hint = 0;
+
                        res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE,
                                         &zc, &zc_len);
                        if (res == -1)
@@ -281,12 +282,14 @@ static void setup_sockaddr(int domain, const char *str_addr,
 static void do_accept(int fdlisten)
 {
        pthread_attr_t attr;
+       int rcvlowat;
 
        pthread_attr_init(&attr);
        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 
+       rcvlowat = chunk_size;
        if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT,
-                      &chunk_size, sizeof(chunk_size)) == -1) {
+                      &rcvlowat, sizeof(rcvlowat)) == -1) {
                perror("setsockopt SO_RCVLOWAT");
        }
 
index e0d86e1668c01882e542de42cacd3bdc82364432..e3c772c6a7c7ce15eeab358fa767f096eb6af445 100644 (file)
@@ -27,7 +27,7 @@
 #define __stack_aligned__      __attribute__((aligned(16)))
 struct cr_clone_arg {
        char stack[128] __stack_aligned__;
-       char stack_ptr[0];
+       char stack_ptr[];
 };
 
 static int child(void *args)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
new file mode 100644 (file)
index 0000000..1cda2e1
--- /dev/null
@@ -0,0 +1,21 @@
+[
+    {
+        "id": "83be",
+        "name": "Create FQ-PIE with invalid number of flows",
+        "category": [
+            "qdisc",
+            "fq_pie"
+        ],
+        "setup": [
+            "$IP link add dev $DUMMY type dummy || /bin/true"
+        ],
+        "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
+        "expExitCode": "2",
+        "verifyCmd": "$TC qdisc show dev $DUMMY",
+        "matchPattern": "qdisc",
+        "matchCount": "0",
+        "teardown": [
+            "$IP link del dev $DUMMY"
+        ]
+    }
+]
index 0edb6d900e8d0b3023d64dc13199bac30b5e22d4..ca17fe0c32807837ee0b554b5a21a8c3dcf710af 100644 (file)
@@ -6,6 +6,7 @@ map_populate
 thuge-gen
 compaction_test
 mlock2-tests
+mremap_dontunmap
 on-fault-limit
 transhuge-stress
 userfaultfd
index 110bc4e4015d675e1a4f2204b1bdfad9fa099b51..6a2caba19ee1d9d8924f3df1d2d5e112f36d3be4 100644 (file)
@@ -74,8 +74,6 @@ int main(int argc, char **argv)
        int write = 0;
        int reserve = 1;
 
-       unsigned long i;
-
        if (signal(SIGINT, sig_handler) == SIG_ERR)
                err(1, "\ncan't catch SIGINT\n");
 
index 936e1ca9410ec16d6935e0b4dd15cfae0d373bb7..17a1f53ceba01aa7536fd6f57029bbb6b2918ea6 100755 (executable)
@@ -48,8 +48,11 @@ cleanup() {
        exec 2>/dev/null
        printf "$orig_message_cost" > /proc/sys/net/core/message_cost
        ip0 link del dev wg0
+       ip0 link del dev wg1
        ip1 link del dev wg0
+       ip1 link del dev wg1
        ip2 link del dev wg0
+       ip2 link del dev wg1
        local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
        [[ -n $to_kill ]] && kill $to_kill
        pp ip netns del $netns1
@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2
 key1="$(pp wg genkey)"
 key2="$(pp wg genkey)"
 key3="$(pp wg genkey)"
+key4="$(pp wg genkey)"
 pub1="$(pp wg pubkey <<<"$key1")"
 pub2="$(pp wg pubkey <<<"$key2")"
 pub3="$(pp wg pubkey <<<"$key3")"
+pub4="$(pp wg pubkey <<<"$key4")"
 psk="$(pp wg genpsk)"
 [[ -n $key1 && -n $key2 && -n $psk ]]
 
 configure_peers() {
        ip1 addr add 192.168.241.1/24 dev wg0
-       ip1 addr add fd00::1/24 dev wg0
+       ip1 addr add fd00::1/112 dev wg0
 
        ip2 addr add 192.168.241.2/24 dev wg0
-       ip2 addr add fd00::2/24 dev wg0
+       ip2 addr add fd00::2/112 dev wg0
 
        n1 wg set wg0 \
                private-key <(echo "$key1") \
@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2
 n1 wg set wg0 private-key <(echo "$key3")
 n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
 n1 ping -W 1 -c 1 192.168.241.2
+n2 wg set wg0 peer "$pub3" remove
+
+# Test that we can route wg through wg
+ip1 addr flush dev wg0
+ip2 addr flush dev wg0
+ip1 addr add fd00::5:1/112 dev wg0
+ip2 addr add fd00::5:2/112 dev wg0
+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
+ip1 link add wg1 type wireguard
+ip2 link add wg1 type wireguard
+ip1 addr add 192.168.241.1/24 dev wg1
+ip1 addr add fd00::1/112 dev wg1
+ip2 addr add 192.168.241.2/24 dev wg1
+ip2 addr add fd00::2/112 dev wg1
+ip1 link set mtu 1340 up dev wg1
+ip2 link set mtu 1340 up dev wg1
+n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
+n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
+tests
+# Try to set up a routing loop between the two namespaces
+ip1 link set netns $netns0 dev wg1
+ip0 addr add 192.168.241.1/24 dev wg1
+ip0 link set up dev wg1
+n0 ping -W 1 -c 1 192.168.241.2
+n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
+ip2 link del wg0
+ip2 link del wg1
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
 
+ip0 link del wg1
 ip1 link del wg0
-ip2 link del wg0
 
 # Test using NAT. We now change the topology to this:
 # ┌────────────────────────────────────────┐    ┌────────────────────────────────────────────────┐     ┌────────────────────────────────────────┐
@@ -282,6 +316,20 @@ pp sleep 3
 n2 ping -W 1 -c 1 192.168.241.1
 n1 wg set wg0 peer "$pub2" persistent-keepalive 0
 
+# Test that onion routing works, even when it loops
+n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
+ip1 addr add 192.168.242.1/24 dev wg0
+ip2 link add wg1 type wireguard
+ip2 addr add 192.168.242.2/24 dev wg1
+n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
+ip2 link set wg1 up
+n1 ping -W 1 -c 1 192.168.242.2
+ip2 link del wg1
+n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
+! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
+n1 wg set wg0 peer "$pub3" remove
+ip1 addr del 192.168.242.1/24 dev wg0
+
 # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
 ip1 -6 addr add fc00::9/96 dev vethc
 ip1 -6 route add default via fc00::1
index 90598a425c18b05079e86a6d64d4aa6fd5d8e4e9..4bdd6c1a19d355b7d41c0a6e12282eafa4f955c5 100644 (file)
@@ -44,7 +44,7 @@ endef
 $(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8))
 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c))
 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d))
-$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae))
+$(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692))
 $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c))
 $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa))
 $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a))
index 990c510a9cfa5f4e5ac279dd78cc79725c1a8d9e..f52f1e2bc7f64dba3c4d24d276fde663fed03909 100644 (file)
@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=hvc0 wg.success=hvc1"
 CONFIG_SECTION_MISMATCH_WARN_ONLY=y
 CONFIG_FRAME_WARN=1280
+CONFIG_THREAD_SHIFT=14
index 5909e7ef2a5c5c1a30b40e4c7d58ff585335e756..9803dbb54181ce8cac6111d83b8c36c58331d959 100644 (file)
@@ -25,7 +25,6 @@ CONFIG_KASAN=y
 CONFIG_KASAN_INLINE=y
 CONFIG_UBSAN=y
 CONFIG_UBSAN_SANITIZE_ALL=y
-CONFIG_UBSAN_NO_ALIGNMENT=y
 CONFIG_UBSAN_NULL=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192
index d31f267961e75a956174964e445338a74abc757b..25c0e47d57cbe314ff9b993c4456c0d8fd65a0d1 100644 (file)
@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
  */
 void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
 {
+       u32 pc = *vcpu_pc(vcpu);
        bool is_thumb;
 
        is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
        if (is_thumb && !is_wide_instr)
-               *vcpu_pc(vcpu) += 2;
+               pc += 2;
        else
-               *vcpu_pc(vcpu) += 4;
+               pc += 4;
+
+       *vcpu_pc(vcpu) = pc;
+
        kvm_adjust_itstate(vcpu);
 }
index 14a162e295a947f70cf5b6bd8e573af7b2d628c8..ae364716ee40c1140ea4e6e9d12e964728a94859 100644 (file)
@@ -186,6 +186,33 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
 }
 
+static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       /*
+        * Zero the input registers' upper 32 bits. They will be fully
+        * zeroed on exit, so we're fine changing them in place.
+        */
+       for (i = 1; i < 4; i++)
+               vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i)));
+}
+
+static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
+{
+       switch(fn) {
+       case PSCI_0_2_FN64_CPU_SUSPEND:
+       case PSCI_0_2_FN64_CPU_ON:
+       case PSCI_0_2_FN64_AFFINITY_INFO:
+               /* Disallow these functions for 32bit guests */
+               if (vcpu_mode_is_32bit(vcpu))
+                       return PSCI_RET_NOT_SUPPORTED;
+               break;
+       }
+
+       return 0;
+}
+
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -193,6 +220,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val)
+               goto out;
+
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -210,12 +241,16 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                val = PSCI_RET_SUCCESS;
                break;
        case PSCI_0_2_FN_CPU_ON:
+               kvm_psci_narrow_to_32bit(vcpu);
+               fallthrough;
        case PSCI_0_2_FN64_CPU_ON:
                mutex_lock(&kvm->lock);
                val = kvm_psci_vcpu_on(vcpu);
                mutex_unlock(&kvm->lock);
                break;
        case PSCI_0_2_FN_AFFINITY_INFO:
+               kvm_psci_narrow_to_32bit(vcpu);
+               fallthrough;
        case PSCI_0_2_FN64_AFFINITY_INFO:
                val = kvm_psci_vcpu_affinity_info(vcpu);
                break;
@@ -256,6 +291,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
+out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -273,6 +309,10 @@ static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
                break;
        case PSCI_1_0_FN_PSCI_FEATURES:
                feature = smccc_get_arg1(vcpu);
+               val = kvm_psci_check_allowed_function(vcpu, feature);
+               if (val)
+                       break;
+
                switch(feature) {
                case PSCI_0_2_FN_PSCI_VERSION:
                case PSCI_0_2_FN_CPU_SUSPEND:
index a963b9d766b73a75d93a500cdaf0c2cc49ba8fdc..32e32d67a127f0e7a3b59a4e5b36d1c9c124d7b9 100644 (file)
@@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm)
                }
        }
 
-       if (vgic_has_its(kvm)) {
+       if (vgic_has_its(kvm))
                vgic_lpi_translation_cache_init(kvm);
+
+       /*
+        * If we have GICv4.1 enabled, unconditionnaly request enable the
+        * v4 support so that we get HW-accelerated vSGIs. Otherwise, only
+        * enable it if we present a virtual ITS to the guest.
+        */
+       if (vgic_supports_direct_msis(kvm)) {
                ret = vgic_v4_init(kvm);
                if (ret)
                        goto out;
@@ -348,6 +355,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
+       /*
+        * Retire all pending LPIs on this vcpu anyway as we're
+        * going to destroy it.
+        */
+       vgic_flush_pending_lpis(vcpu);
+
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
 }
 
@@ -359,10 +372,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
 
        vgic_debug_destroy(kvm);
 
-       kvm_vgic_dist_destroy(kvm);
-
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_destroy(vcpu);
+
+       kvm_vgic_dist_destroy(kvm);
 }
 
 void kvm_vgic_destroy(struct kvm *kvm)
index d53d34a33e35d7496519c4fc234eed6cec7bcb2a..c012a52b19f5757cef32fa84af25b78c6ca744c9 100644 (file)
@@ -96,14 +96,21 @@ out_unlock:
         * We "cache" the configuration table entries in our struct vgic_irq's.
         * However we only have those structs for mapped IRQs, so we read in
         * the respective config data from memory here upon mapping the LPI.
+        *
+        * Should any of these fail, behave as if we couldn't create the LPI
+        * by dropping the refcount and returning the error.
         */
        ret = update_lpi_config(kvm, irq, NULL, false);
-       if (ret)
+       if (ret) {
+               vgic_put_irq(kvm, irq);
                return ERR_PTR(ret);
+       }
 
        ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
-       if (ret)
+       if (ret) {
+               vgic_put_irq(kvm, irq);
                return ERR_PTR(ret);
+       }
 
        return irq;
 }
index 5945f062d74973d5fced21356f79fb24dc3255a0..a016f07adc2811295f33c85da86cee52f90f8c8c 100644 (file)
@@ -409,24 +409,28 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
                NULL, vgic_mmio_uaccess_write_v2_group, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+               NULL, vgic_uaccess_write_cenable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
-               vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
+               vgic_mmio_read_pending, vgic_mmio_write_spending,
+               NULL, vgic_uaccess_write_spending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
-               vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
+               vgic_mmio_read_pending, vgic_mmio_write_cpending,
+               NULL, vgic_uaccess_write_cpending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
index e72dcc4542475a74fa1f890532d26cd6945fdad5..89a14ec8b33bb2009786d921759868943a152e24 100644 (file)
@@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm)
 
 bool vgic_supports_direct_msis(struct kvm *kvm)
 {
-       return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+       return (kvm_vgic_global_state.has_gicv4_1 ||
+               (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
 }
 
 /*
@@ -538,10 +539,12 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
                vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+              NULL, vgic_uaccess_write_cenable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
@@ -553,11 +556,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
                1, VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
@@ -609,11 +612,13 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
        REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
                vgic_mmio_read_group, vgic_mmio_write_group, 4,
                VGIC_ACCESS_32bit),
-       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 4,
                VGIC_ACCESS_32bit),
-       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+               NULL, vgic_uaccess_write_cenable, 4,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
@@ -625,12 +630,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive,
-               4, VGIC_ACCESS_32bit),
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
+               VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive,
-               4, VGIC_ACCESS_32bit),
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
+               VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
                vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
index 2199302597fafa32edced522d4df282bb87008dc..b2d73fc0d1ef48091ee428f915cad6054b3c3c21 100644 (file)
@@ -184,6 +184,48 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
        }
 }
 
+int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->enabled = true;
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->enabled = false;
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                     gpa_t addr, unsigned int len)
 {
@@ -219,17 +261,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
        return value;
 }
 
-/* Must be called with irq->irq_lock held */
-static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
-                                bool is_uaccess)
-{
-       if (is_uaccess)
-               return;
-
-       irq->pending_latch = true;
-       vgic_irq_set_phys_active(irq, true);
-}
-
 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
 {
        return (vgic_irq_is_sgi(irq->intid) &&
@@ -240,7 +271,6 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
 {
-       bool is_uaccess = !kvm_get_running_vcpu();
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
        unsigned long flags;
@@ -270,22 +300,48 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                        continue;
                }
 
+               irq->pending_latch = true;
                if (irq->hw)
-                       vgic_hw_irq_spending(vcpu, irq, is_uaccess);
-               else
-                       irq->pending_latch = true;
+                       vgic_irq_set_phys_active(irq, true);
+
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 
-/* Must be called with irq->irq_lock held */
-static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
-                                bool is_uaccess)
+int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val)
 {
-       if (is_uaccess)
-               return;
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->pending_latch = true;
 
+               /*
+                * GICv2 SGIs are terribly broken. We can't restore
+                * the source of the interrupt, so just pick the vcpu
+                * itself as the source...
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source |= BIT(vcpu->vcpu_id);
+
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
        irq->pending_latch = false;
 
        /*
@@ -308,7 +364,6 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
 {
-       bool is_uaccess = !kvm_get_running_vcpu();
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
        unsigned long flags;
@@ -339,7 +394,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                }
 
                if (irq->hw)
-                       vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
+                       vgic_hw_irq_cpending(vcpu, irq);
                else
                        irq->pending_latch = false;
 
@@ -348,8 +403,68 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        }
 }
 
-unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
-                                   gpa_t addr, unsigned int len)
+int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               /*
+                * More fun with GICv2 SGIs! If we're clearing one of them
+                * from userspace, which source vcpu to clear? Let's not
+                * even think of it, and blow the whole set.
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source = 0;
+
+               irq->pending_latch = false;
+
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+/*
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+ * is not queued on some running VCPU's LRs, because then the change to the
+ * active state can be overwritten when the VCPU's state is synced coming back
+ * from the guest.
+ *
+ * For shared interrupts as well as GICv3 private interrupts, we have to
+ * stop all the VCPUs because interrupts can be migrated while we don't hold
+ * the IRQ locks and we don't want to be chasing moving targets.
+ *
+ * For GICv2 private interrupts we don't have to do anything because
+ * userspace accesses to the VGIC state already require all VCPUs to be
+ * stopped, and only the VCPU itself can modify its private interrupts
+ * active state, which guarantees that the VCPU is not running.
+ */
+static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+{
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+           intid >= VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_halt_guest(vcpu->kvm);
+}
+
+/* See vgic_access_active_prepare */
+static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+{
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+           intid >= VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_resume_guest(vcpu->kvm);
+}
+
+static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        u32 value = 0;
@@ -359,6 +474,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
        for (i = 0; i < len * 8; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /*
+                * Even for HW interrupts, don't evaluate the HW state as
+                * all the guest is interested in is the virtual state.
+                */
                if (irq->active)
                        value |= (1U << i);
 
@@ -368,6 +487,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
        return value;
 }
 
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       u32 val;
+
+       mutex_lock(&vcpu->kvm->lock);
+       vgic_access_active_prepare(vcpu, intid);
+
+       val = __vgic_mmio_read_active(vcpu, addr, len);
+
+       vgic_access_active_finish(vcpu, intid);
+       mutex_unlock(&vcpu->kvm->lock);
+
+       return val;
+}
+
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len)
+{
+       return __vgic_mmio_read_active(vcpu, addr, len);
+}
+
 /* Must be called with irq->irq_lock held */
 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
                                      bool active, bool is_uaccess)
@@ -426,36 +568,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 }
 
-/*
- * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
- * is not queued on some running VCPU's LRs, because then the change to the
- * active state can be overwritten when the VCPU's state is synced coming back
- * from the guest.
- *
- * For shared interrupts, we have to stop all the VCPUs because interrupts can
- * be migrated while we don't hold the IRQ locks and we don't want to be
- * chasing moving targets.
- *
- * For private interrupts we don't have to do anything because userspace
- * accesses to the VGIC state already require all VCPUs to be stopped, and
- * only the VCPU itself can modify its private interrupts active state, which
- * guarantees that the VCPU is not running.
- */
-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
-{
-       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
-           intid > VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_halt_guest(vcpu->kvm);
-}
-
-/* See vgic_change_active_prepare */
-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
-{
-       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
-           intid > VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_resume_guest(vcpu->kvm);
-}
-
 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
                                      gpa_t addr, unsigned int len,
                                      unsigned long val)
@@ -477,11 +589,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
        mutex_lock(&vcpu->kvm->lock);
-       vgic_change_active_prepare(vcpu, intid);
+       vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 
-       vgic_change_active_finish(vcpu, intid);
+       vgic_access_active_finish(vcpu, intid);
        mutex_unlock(&vcpu->kvm->lock);
 }
 
@@ -514,11 +626,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
        mutex_lock(&vcpu->kvm->lock);
-       vgic_change_active_prepare(vcpu, intid);
+       vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 
-       vgic_change_active_finish(vcpu, intid);
+       vgic_access_active_finish(vcpu, intid);
        mutex_unlock(&vcpu->kvm->lock);
 }
 
index 5af2aefad4359a26b1c9db8e0dc4d9edcfe99825..fefcca2b14dc7297db682d523b0326de249622ad 100644 (file)
@@ -138,6 +138,14 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
                             gpa_t addr, unsigned int len,
                             unsigned long val);
 
+int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val);
+
+int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val);
+
 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                     gpa_t addr, unsigned int len);
 
@@ -149,9 +157,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val);
 
+int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val);
+
+int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val);
+
 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
                                    gpa_t addr, unsigned int len);
 
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len);
+
 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
                             gpa_t addr, unsigned int len,
                             unsigned long val);
index 74bdb7bf32952ef3e632ff45af3cfedffde2d12e..731c1e517716f8f26c736bcdb3527fdb1a7743ff 100644 (file)
@@ -259,6 +259,7 @@ static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
 }
 
 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+                                struct kvm_vcpu *except,
                                 unsigned long *vcpu_bitmap, cpumask_var_t tmp)
 {
        int i, cpu, me;
@@ -268,7 +269,8 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
        me = get_cpu();
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
-               if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
+               if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
+                   vcpu == except)
                        continue;
 
                kvm_make_request(req, vcpu);
@@ -288,19 +290,25 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
        return called;
 }
 
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
+bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
+                                     struct kvm_vcpu *except)
 {
        cpumask_var_t cpus;
        bool called;
 
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
 
-       called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
+       called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
 
        free_cpumask_var(cpus);
        return called;
 }
 
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
+{
+       return kvm_make_all_cpus_request_except(kvm, req, NULL);
+}
+
 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {