Merge branches 'acpi-ec' and 'acpi-processor'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 10 Jan 2022 15:57:59 +0000 (16:57 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 10 Jan 2022 15:57:59 +0000 (16:57 +0100)
Merge ACPI EC driver updates and ACPI processor driver updates for
5.17-rc1:

 - Rework flushing of EC work while suspended to idle and clean up
   the handling of events in the ACPI EC driver (Rafael Wysocki).

 - Prohibit ec_sys module parameter write_support from being used
   when the system is locked down (Hans de Goede).

 - Make the ACPI processor thermal driver use cpufreq_cpu_get() to
   check for presence of cpufreq policy (Manfred Spraul).

 - Avoid unnecessary CPU cache flushing in the ACPI processor idle
   driver (Kirill A. Shutemov).

 - Replace kernel.h with the necessary inclusions in the ACPI
   processor driver (Andy Shevchenko).

 - Use swap() instead of open coding it in the ACPI processor idle
   driver (Guo Zhengkui).

* acpi-ec:
  ACPI: EC: Mark the ec_sys write_support param as module_param_hw()
  ACPI: EC: Relocate acpi_ec_create_query() and drop acpi_ec_delete_query()
  ACPI: EC: Make the event work state machine visible
  ACPI: EC: Avoid queuing unnecessary work in acpi_ec_submit_event()
  ACPI: EC: Rename three functions
  ACPI: EC: Simplify locking in acpi_ec_event_handler()
  ACPI: EC: Rearrange the loop in acpi_ec_event_handler()
  ACPI: EC: Fold acpi_ec_check_event() into acpi_ec_event_handler()
  ACPI: EC: Pass one argument to acpi_ec_query()
  ACPI: EC: Call advance_transaction() from acpi_ec_dispatch_gpe()
  ACPI: EC: Rework flushing of EC work while suspended to idle

* acpi-processor:
  ACPI: processor: thermal: avoid cpufreq_get_policy()
  ACPI: processor: idle: Only flush cache on entering C3
  ACPI: processor idle: Use swap() instead of open coding it
  ACPI: processor: Replace kernel.h with the necessary inclusions

1019 files changed:
.mailmap
Documentation/admin-guide/blockdev/drbd/figures.rst
Documentation/admin-guide/blockdev/drbd/peer-states-8.dot [moved from Documentation/admin-guide/blockdev/drbd/node-states-8.dot with 71% similarity]
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/pointer-authentication.rst
Documentation/conf.py
Documentation/cpu-freq/core.rst
Documentation/devicetree/bindings/i2c/apple,i2c.yaml
Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
Documentation/devicetree/bindings/input/gpio-keys.yaml
Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
Documentation/devicetree/bindings/net/ethernet-phy.yaml
Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
Documentation/devicetree/bindings/power/supply/bq25980.yaml
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
Documentation/filesystems/netfs_library.rst
Documentation/locking/locktypes.rst
Documentation/networking/bonding.rst
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
Documentation/networking/timestamping.rst
Documentation/process/changes.rst
Documentation/process/submitting-patches.rst
Documentation/sound/hd-audio/models.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
arch/arm/boot/dts/imx6qp-prtwd3.dts
arch/arm/boot/dts/imx6ull-pinfunc.h
arch/arm/boot/dts/ls1021a-tsn.dts
arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
arch/arm/boot/dts/socfpga_arria5_socdk.dts
arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/kernel/entry-armv.S
arch/arm/kernel/head-nommu.S
arch/arm/mach-rockchip/platsmp.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
arch/arm64/boot/dts/apple/t8103-j274.dts
arch/arm64/boot/dts/apple/t8103.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
arch/arm64/include/asm/kvm_arm.h
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/machine_kexec.c
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/csky/kernel/traps.c
arch/mips/include/asm/mach-ralink/spaces.h
arch/mips/include/asm/pci.h
arch/mips/net/bpf_jit_comp.h
arch/mips/pci/pci-generic.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/configs/generic-64bit_defconfig
arch/parisc/include/asm/futex.h
arch/parisc/install.sh
arch/parisc/kernel/syscall.S
arch/parisc/kernel/time.c
arch/parisc/kernel/traps.c
arch/powerpc/kernel/module_64.c
arch/powerpc/platforms/85xx/smp.c
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/mmu.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/pci_io.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/irq.c
arch/s390/kernel/machine_kexec_file.c
arch/s390/lib/test_unwind.c
arch/x86/Kconfig
arch/x86/entry/entry_64.S
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pkru.h
arch/x86/include/asm/sev-common.h
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/setup.c
arch/x86/kernel/sev.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_sync.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/ioapic.h
arch/x86/kvm/irq.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/posted_intr.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/quirks.c
arch/x86/realmode/init.c
arch/x86/xen/xen-asm.S
block/blk-iocost.c
block/fops.c
block/ioprio.c
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/acpi_pcc.c [new file with mode: 0644]
drivers/acpi/acpi_video.c
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/evhandler.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/bus.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/pci_link.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_thermal.c
drivers/acpi/property.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/tables.c
drivers/acpi/thermal.c
drivers/acpi/video_detect.c
drivers/acpi/x86/s2idle.c
drivers/acpi/x86/utils.c
drivers/android/binder.c
drivers/ata/ahci_ceva.c
drivers/ata/libata-core.c
drivers/ata/libata-sata.c
drivers/ata/libata-scsi.c
drivers/ata/pata_falcon.c
drivers/ata/sata_fsl.c
drivers/base/power/main.c
drivers/block/loop.c
drivers/block/xen-blkfront.c
drivers/bus/mhi/core/pm.c
drivers/bus/mhi/pci_generic.c
drivers/bus/sunxi-rsb.c
drivers/char/agp/parisc-agp.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/clk.c
drivers/clk/imx/clk-imx8qxp-lpcg.c
drivers/clk/imx/clk-imx8qxp.c
drivers/clk/qcom/clk-alpha-pll.c
drivers/clk/qcom/clk-regmap-mux.c
drivers/clk/qcom/common.c
drivers/clk/qcom/common.h
drivers/clk/qcom/gcc-sm6125.c
drivers/clk/versatile/clk-icst.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/dw_apb_timer_of.c
drivers/cpufreq/cpufreq.c
drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/dma-buf/heaps/system_heap.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/dw-edma/dw-edma-pcie.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/submit.c
drivers/dma/st_fdma.c
drivers/dma/ti/k3-udma.c
drivers/firmware/scpi_pm_domain.c
drivers/firmware/tegra/bpmp-debugfs.c
drivers/gpio/gpio-dln2.c
drivers/gpio/gpio-virtio.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem_shmem_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/lima/lima_device.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_devfreq.c
drivers/gpu/drm/tiny/simpledrm.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/hid/Kconfig
drivers/hid/hid-asus.c
drivers/hid/hid-bigbenff.c
drivers/hid/hid-chicony.c
drivers/hid/hid-corsair.c
drivers/hid/hid-elan.c
drivers/hid/hid-elo.c
drivers/hid/hid-ft260.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtek-kbd.c
drivers/hid/hid-holtek-mouse.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-quirks.c
drivers/hid/hid-roccat-arvo.c
drivers/hid/hid-roccat-isku.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-konepure.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-lua.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-roccat-ryos.c
drivers/hid/hid-roccat-savu.c
drivers/hid/hid-samsung.c
drivers/hid/hid-sony.c
drivers/hid/hid-thrustmaster.c
drivers/hid/hid-u2fzero.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-params.c
drivers/hid/hid-vivaldi.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/wacom_sys.c
drivers/hv/Kconfig
drivers/hwmon/corsair-psu.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/lm90.c
drivers/hwmon/nct6775.c
drivers/hwmon/pwm-fan.c
drivers/hwmon/sht4x.c
drivers/i2c/busses/i2c-cbus-gpio.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/busses/i2c-virtio.c
drivers/i2c/i2c-core-acpi.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/kxsd9.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ad7768-1.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/axp20x_adc.c
drivers/iio/adc/dln2-adc.c
drivers/iio/adc/stm32-adc.c
drivers/iio/gyro/adxrs290.c
drivers/iio/gyro/itg3200_buffer.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/ltr501.c
drivers/iio/light/stk3310.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_srq.c
drivers/infiniband/hw/irdma/hw.c
drivers/infiniband/hw/irdma/main.h
drivers/infiniband/hw/irdma/pble.c
drivers/infiniband/hw/irdma/pble.h
drivers/infiniband/hw/irdma/utils.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/irdma/verbs.h
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
drivers/input/misc/iqs626a.c
drivers/input/mouse/elantech.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/goodix.h
drivers/input/touchscreen/goodix_fwupload.c
drivers/irqchip/irq-apple-aic.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-aspeed-scu-ic.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-nvic.c
drivers/md/bcache/super.c
drivers/md/dm-integrity.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree-remove.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/eeprom/at25.c
drivers/misc/fastrpc.c
drivers/mmc/core/core.c
drivers/mmc/core/core.h
drivers/mmc/core/host.c
drivers/mmc/host/meson-mx-sdhc-mmc.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-tegra.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/Kconfig
drivers/mtd/nand/raw/fsmc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_options.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/m_can/m_can.h
drivers/net/can/m_can/m_can_pci.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pcmcia.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/serdes.c
drivers/net/dsa/mv88e6xxx/serdes.h
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/rtl8365mb.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/aquantia/atlantic/aq_common.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/google/gve/gve_adminq.c
drivers/net/ethernet/google/gve/gve_utils.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
drivers/net/ethernet/intel/ice/ice_fdir.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_ptp.c
drivers/net/ethernet/intel/ice/ice_ptp.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
drivers/net/ethernet/marvell/prestera/prestera_main.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/micrel/ks8851_par.c
drivers/net/ethernet/microsoft/mana/hw_channel.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/sfc/ef100_nic.c
drivers/net/ethernet/sfc/falcon/rx.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/fjes/fjes_main.c
drivers/net/hamradio/mkiss.c
drivers/net/netdevsim/bpf.c
drivers/net/netdevsim/ethtool.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phylink.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/main.c
drivers/net/wireguard/queueing.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/ratelimiter.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/ath/ath11k/mhi.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
drivers/net/wireless/intel/iwlegacy/Kconfig
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/fw/uefi.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/realtek/rtw89/fw.c
drivers/net/wireless/realtek/rtw89/fw.h
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem.h
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/rx.c
drivers/net/xen-netfront.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/zns.c
drivers/nvme/target/tcp.c
drivers/of/irq.c
drivers/pci/controller/Kconfig
drivers/pci/controller/dwc/pci-exynos.c
drivers/pci/controller/dwc/pcie-qcom-ep.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pcie-apple.c
drivers/pci/msi.c
drivers/phy/hisilicon/phy-hi3670-pcie.c
drivers/phy/marvell/phy-mvebu-cp110-utmi.c
drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/qualcomm/phy-qcom-usb-hsic.c
drivers/phy/st/phy-stm32-usbphyc.c
drivers/phy/ti/phy-am654-serdes.c
drivers/phy/ti/phy-j721e-wiz.c
drivers/phy/ti/phy-omap-usb2.c
drivers/phy/ti/phy-tusb1210.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/x86/Makefile
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/intel/Kconfig
drivers/platform/x86/intel/hid.c
drivers/platform/x86/intel/pmc/pltdrv.c
drivers/platform/x86/lg-laptop.c
drivers/platform/x86/system76_acpi.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/powercap/dtpm.c
drivers/reset/tegra/reset-bpmp.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/qedi/qedi_fw.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qedi/qedi_iscsi.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/scsi_debug.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/soc/imx/imx8m-blk-ctrl.c
drivers/soc/imx/soc-imx.c
drivers/soc/tegra/fuse/fuse-tegra.c
drivers/soc/tegra/fuse/fuse.h
drivers/spi/spi-armada-3700.c
drivers/tee/amdtee/core.c
drivers/tee/optee/core.c
drivers/tee/optee/smc_abi.c
drivers/tee/tee_shm.c
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/n_hdlc.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_bcm7271.c
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/liteuart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-mem.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/cdns3/cdnsp-trace.h
drivers/usb/cdns3/host.c
drivers/usb/core/config.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/early/xhci-dbc.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/dbgp.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mtk-sch.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vfio/pci/vfio_pci_igd.c
drivers/vfio/vfio.c
drivers/vhost/vdpa.c
drivers/video/console/vgacon.c
drivers/virtio/virtio_ring.c
drivers/xen/events/events_base.c
fs/afs/file.c
fs/afs/super.c
fs/aio.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-tree.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/root-tree.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/cifs/connect.c
fs/cifs/fs_context.c
fs/cifs/fscache.c
fs/cifs/inode.c
fs/cifs/sess.c
fs/file.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/io-wq.c
fs/io_uring.c
fs/ksmbd/ndr.c
fs/ksmbd/smb2ops.c
fs/ksmbd/smb2pdu.c
fs/netfs/read_helper.c
fs/nfsd/nfs3proc.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsproc.c
fs/signalfd.c
fs/smbfs_common/cifs_arc4.c
fs/tracefs/inode.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_super.c
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/acpi/acpi_numa.h
include/acpi/acpixf.h
include/acpi/actbl2.h
include/acpi/actypes.h
include/acpi/processor.h
include/linux/acpi.h
include/linux/bpf.h
include/linux/btf.h
include/linux/cacheinfo.h
include/linux/compiler.h
include/linux/delay.h
include/linux/device/driver.h
include/linux/filter.h
include/linux/gfp.h
include/linux/hid.h
include/linux/instrumentation.h
include/linux/kprobes.h
include/linux/memblock.h
include/linux/mhi.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/linux/pagemap.h
include/linux/percpu-refcount.h
include/linux/phy.h
include/linux/pm_runtime.h
include/linux/regulator/driver.h
include/linux/sched/cputime.h
include/linux/siphash.h
include/linux/skbuff.h
include/linux/tee_drv.h
include/linux/virtio_net.h
include/linux/wait.h
include/net/bond_alb.h
include/net/busy_poll.h
include/net/dst_cache.h
include/net/fib_rules.h
include/net/ip_fib.h
include/net/netfilter/nf_conntrack.h
include/net/netns/ipv4.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sock.h
include/sound/soc-acpi.h
include/uapi/asm-generic/poll.h
include/uapi/drm/virtgpu_drm.h
include/uapi/linux/byteorder/big_endian.h
include/uapi/linux/byteorder/little_endian.h
include/uapi/linux/if_ether.h
include/uapi/linux/mptcp.h
include/uapi/linux/resource.h
include/xen/events.h
kernel/audit.c
kernel/bpf/btf.c
kernel/bpf/verifier.c
kernel/crash_core.c
kernel/kprobes.c
kernel/locking/rtmutex.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/wait.c
kernel/signal.c
kernel/softirq.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_synth.c
kernel/trace/tracing_map.c
kernel/ucount.c
lib/Kconfig.debug
lib/siphash.c
mm/Kconfig
mm/backing-dev.c
mm/damon/core.c
mm/damon/dbgfs.c
mm/damon/vaddr-test.h
mm/damon/vaddr.c
mm/filemap.c
mm/hugetlb.c
mm/kfence/core.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/slub.c
mm/swap_slots.c
net/ax25/af_ax25.c
net/bridge/br_ioctl.c
net/core/dev.c
net/core/devlink.c
net/core/dst_cache.c
net/core/fib_rules.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock_map.c
net/dsa/tag_ocelot.c
net/ethtool/netlink.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/fib6_rules.c
net/ipv6/ip6_offload.c
net/ipv6/seg6_iptunnel.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/driver-ops.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mctp/route.c
net/mctp/test/utils.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/sockopt.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_set_pipapo_avx2.c
net/netlink/af_netlink.c
net/nfc/netlink.c
net/openvswitch/flow.c
net/packet/af_packet.c
net/phonet/pep.c
net/rds/connection.c
net/rds/tcp.c
net/rxrpc/conn_client.c
net/rxrpc/peer_object.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_cake.c
net/sched/sch_ets.c
net/sched/sch_fq_pie.c
net/sched/sch_frag.c
net/smc/af_smc.c
net/smc/smc_close.c
net/smc/smc_core.c
net/tipc/crypto.c
net/tls/tls_sw.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/reg.c
net/xdp/xsk.c
samples/ftrace/Makefile
samples/ftrace/ftrace-direct-multi-modify.c [new file with mode: 0644]
scripts/recordmcount.pl
security/selinux/hooks.c
security/tomoyo/util.c
sound/core/control_compat.c
sound/core/jack.c
sound/core/oss/pcm_oss.c
sound/core/rawmidi.c
sound/drivers/opl3/opl3_midi.c
sound/hda/intel-dsp-config.c
sound/hda/intel-sdw-acpi.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_cs8409.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/pci-acp6x.c
sound/soc/codecs/cs35l41-spi.c
sound/soc/codecs/cs35l41.c
sound/soc/codecs/cs35l41.h
sound/soc/codecs/rk817_codec.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682s.c
sound/soc/codecs/tas2770.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wsa881x.c
sound/soc/intel/common/soc-acpi-intel-cml-match.c
sound/soc/meson/aiu-encoder-i2s.c
sound/soc/meson/aiu-fifo-i2s.c
sound/soc/meson/aiu-fifo.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/soc-acpi.c
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/tegra/tegra186_dspk.c
sound/soc/tegra/tegra210_admaif.c
sound/soc/tegra/tegra210_adx.c
sound/soc/tegra/tegra210_ahub.c
sound/soc/tegra/tegra210_amx.c
sound/soc/tegra/tegra210_dmic.c
sound/soc/tegra/tegra210_i2s.c
sound/soc/tegra/tegra210_mixer.c
sound/soc/tegra/tegra210_mvc.c
sound/soc/tegra/tegra210_sfc.c
sound/soc/tegra/tegra_asoc_machine.c
sound/soc/tegra/tegra_asoc_machine.h
sound/usb/mixer_quirks.c
tools/bpf/resolve_btfids/main.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-libpython-version.c [deleted file]
tools/include/linux/debug_locks.h [deleted file]
tools/include/linux/hardirq.h [deleted file]
tools/include/linux/irqflags.h [deleted file]
tools/include/linux/kernel.h
tools/include/linux/lockdep.h [deleted file]
tools/include/linux/math.h [new file with mode: 0644]
tools/include/linux/proc_fs.h [deleted file]
tools/include/linux/spinlock.h
tools/include/linux/stacktrace.h [deleted file]
tools/objtool/elf.c
tools/objtool/objtool.c
tools/perf/Makefile.config
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/bench/sched-messaging.c
tools/perf/builtin-inject.c
tools/perf/tests/expr.c
tools/perf/tests/parse-metric.c
tools/perf/util/bpf_skel/bperf.h [deleted file]
tools/perf/util/bpf_skel/bperf_follower.bpf.c
tools/perf/util/bpf_skel/bperf_leader.bpf.c
tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
tools/perf/util/event.h
tools/perf/util/expr.c
tools/perf/util/header.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt.c
tools/perf/util/perf_regs.c
tools/perf/util/python.c
tools/perf/util/smt.c
tools/power/acpi/Makefile.config
tools/power/acpi/Makefile.rules
tools/testing/radix-tree/linux/lockdep.h
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
tools/testing/selftests/bpf/progs/test_module_attach.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
tools/testing/selftests/bpf/verifier/atomic_fetch.c
tools/testing/selftests/bpf/verifier/search_pruning.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
tools/testing/selftests/damon/.gitignore [new file with mode: 0644]
tools/testing/selftests/damon/Makefile
tools/testing/selftests/damon/_debugfs_common.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_attrs.sh
tools/testing/selftests/damon/debugfs_empty_targets.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_huge_count_read_write.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_schemes.sh [new file with mode: 0644]
tools/testing/selftests/damon/debugfs_target_ids.sh [new file with mode: 0644]
tools/testing/selftests/damon/huge_count_read_write.c [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/kvm_create_max_vcpus.c
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/hyperv_features.c
tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
tools/testing/selftests/kvm/x86_64/userspace_io_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
tools/testing/selftests/net/fcnal-test.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/icmp_redirect.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/toeplitz.c
tools/testing/selftests/netfilter/conntrack_vrf.sh
tools/testing/selftests/netfilter/nft_concat_range.sh
tools/testing/selftests/netfilter/nft_zones_many.sh
tools/testing/selftests/tc-testing/config
tools/testing/selftests/tc-testing/tdc.py
tools/testing/selftests/tc-testing/tdc.sh
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/debug.config
tools/testing/selftests/wireguard/qemu/kernel.config
virt/kvm/kvm_main.c

index 6277bb27b4bfe77756a272e63e83c80e42ab5789..b344067e0acb665b66bb7a299cba69fdf15a82fd 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
+Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
index bd9a4901fe46d7856d1a1cee33ccd78738cad29e..9f73253ea35324231f2e90f1eab75e438ba63654 100644 (file)
@@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
     :alt:   disk-states-8.dot
     :align: center
 
-.. kernel-figure:: node-states-8.dot
-    :alt:   node-states-8.dot
+.. kernel-figure:: peer-states-8.dot
+    :alt:   peer-states-8.dot
     :align: center
similarity index 71%
rename from Documentation/admin-guide/blockdev/drbd/node-states-8.dot
rename to Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
index bfa54e1f8016a183df30fe2523f77848eaf4d2eb..6dc3954954d6afef4e2b64079e99f918f3eb71f8 100644 (file)
@@ -1,8 +1,3 @@
-digraph node_states {
-       Secondary -> Primary           [ label = "ioctl_set_state()" ]
-       Primary   -> Secondary         [ label = "ioctl_set_state()" ]
-}
-
 digraph peer_states {
        Secondary -> Primary           [ label = "recv state packet" ]
        Primary   -> Secondary         [ label = "recv state packet" ]
index 9725c546a0d46db7eda7b138f34511af5558d453..2fba82431efbe7225cfbd7f4ab7ae06e9e4f1e36 100644 (file)
                        architectures force reset to be always executed
        i8042.unlock    [HW] Unlock (ignore) the keylock
        i8042.kbdreset  [HW] Reset device connected to KBD port
+       i8042.probe_defer
+                       [HW] Allow deferred probing upon i8042 probe errors
 
        i810=           [HW,DRM]
 
                        Default is 1 (enabled)
 
        kvm-intel.emulate_invalid_guest_state=
-                       [KVM,Intel] Enable emulation of invalid guest states
-                       Default is 0 (disabled)
+                       [KVM,Intel] Disable emulation of invalid guest state.
+                       Ignored if kvm-intel.enable_unrestricted_guest=1, as
+                       guest state is never invalid for unrestricted guests.
+                       This param doesn't apply to nested guests (L2), as KVM
+                       never emulates invalid L2 guest state.
+                       Default is 1 (enabled)
 
        kvm-intel.flexpriority=
                        [KVM,Intel] Disable FlexPriority feature (TPR shadow).
index f127666ea3a81659daf3bba59c4c64c38bb6ff20..e5dad2e40aa8937b100cfc7def72bf371b89b72a 100644 (file)
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
 virtual address size configured by the kernel. For example, with a
 virtual address size of 48, the PAC is 7 bits wide.
 
-Recent versions of GCC can compile code with APIAKey-based return
-address protection when passed the -msign-return-address option. This
-uses instructions in the HINT space (unless -march=armv8.3-a or higher
-is also passed), and such code can run on systems without the pointer
-authentication extension.
+When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
+with HINT space pointer authentication instructions protecting
+function returns. Kernels built with this option will work on hardware
+with or without pointer authentication support.
 
 In addition to exec(), keys can also be reinitialized to random values
 using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
index 17f7cee569879e5d018e8b38c9317d4b88a714d0..76e5eb5cb62b045e27079d0bf3c1675759484847 100644 (file)
@@ -249,11 +249,16 @@ except ImportError:
 
 html_static_path = ['sphinx-static']
 
-html_context = {
-    'css_files': [
-        '_static/theme_overrides.css',
-    ],
-}
+html_css_files = [
+    'theme_overrides.css',
+]
+
+if major <= 1 and minor < 8:
+    html_context = {
+        'css_files': [
+            '_static/theme_overrides.css',
+        ],
+    }
 
 # Add any extra paths that contain custom files (such as robots.txt or
 # .htaccess) here, relative to this directory. These files are copied
index 33cb90bd1d8f9fac3b21cdb8190b768c30980224..4ceef8e7217c38fc3e07606ce24b05fbed57d71d 100644 (file)
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
 The third argument is a struct cpufreq_freqs with the following
 values:
 
-=====  ===========================
-cpu    number of the affected CPU
+====== ======================================
+policy a pointer to the struct cpufreq_policy
 old    old frequency
 new    new frequency
 flags  flags of the cpufreq driver
-=====  ===========================
+====== ======================================
 
 3. CPUFreq Table Generation with Operating Performance Point (OPP)
 ==================================================================
index 22fc8483256f1cdbca2438bbcbc0a313a78a1988..82b953181a5225a03d20e9d735fa6c3be38beb89 100644 (file)
@@ -20,9 +20,9 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - apple,t8103-i2c
-      - apple,i2c
+    items:
+      - const: apple,t8103-i2c
+      - const: apple,i2c
 
   reg:
     maxItems: 1
@@ -51,7 +51,7 @@ unevaluatedProperties: false
 examples:
   - |
     i2c@35010000 {
-      compatible = "apple,t8103-i2c";
+      compatible = "apple,t8103-i2c", "apple,i2c";
       reg = <0x35010000 0x4000>;
       interrupt-parent = <&aic>;
       interrupts = <0 627 4>;
index c65921e66dc1f4d0f42101c67ebf06488fa5cf12..81c87295912cec047ac6abcdb64e61997ad826ff 100644 (file)
@@ -136,7 +136,7 @@ examples:
         samsung,syscon-phandle = <&pmu_system_controller>;
 
         /* NTC thermistor is a hwmon device */
-        ncp15wb473 {
+        thermistor {
             compatible = "murata,ncp15wb473";
             pullup-uv = <1800000>;
             pullup-ohm = <47000>;
index 060a309ff8e7c757cddc59e083c00a8194db4868..dbe7ecc19ccb94236b5f991a585e127533ccb4ba 100644 (file)
@@ -142,7 +142,7 @@ examples:
         down {
             label = "GPIO Key DOWN";
             linux,code = <108>;
-            interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+            interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
         };
     };
 
index 877183cf42787d5760c052ad2fb549d30464d0e3..1ef849dc74d7ee3c2110de419c514c8acd2fd1af 100644 (file)
@@ -79,6 +79,8 @@ properties:
 
             properties:
               data-lanes:
+                description:
+                  Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
                 items:
                   minItems: 1
                   maxItems: 4
@@ -91,18 +93,6 @@ properties:
             required:
               - data-lanes
 
-            allOf:
-              - if:
-                  properties:
-                    compatible:
-                      contains:
-                        const: fsl,imx7-mipi-csi2
-                then:
-                  properties:
-                    data-lanes:
-                      items:
-                        maxItems: 2
-
       port@1:
         $ref: /schemas/graph.yaml#/properties/port
         description:
index 2766fe45bb98b5307065ea2d79c5b9f2b13d6156..ee42328a109dca28f89f3942f8068140172d5076 100644 (file)
@@ -91,6 +91,14 @@ properties:
       compensate for the board being designed with the lanes
       swapped.
 
+  enet-phy-lane-no-swap:
+    $ref: /schemas/types.yaml#/definitions/flag
+    description:
+      If set, indicates that PHY will disable swap of the
+      TX/RX lanes. This property allows the PHY to work correcly after
+      e.g. wrong bootstrap configuration caused by issues in PCB
+      layout design.
+
   eee-broken-100tx:
     $ref: /schemas/types.yaml#/definitions/flag
     description:
index 04d5654efb38070b482a41a8d593cec115b115d5..79906519c6522c34aa93d1710fb386371ac78327 100644 (file)
@@ -29,7 +29,7 @@ properties:
           - PHY_TYPE_PCIE
           - PHY_TYPE_SATA
           - PHY_TYPE_SGMII
-          - PHY_TYPE_USB
+          - PHY_TYPE_USB3
       - description: The PHY instance
         minimum: 0
         maximum: 1 # for DP, SATA or USB
index 06eca6667f67899d0cfe8b10634957b3a1220ed5..8367a1fd40571c8626ec5ad5149272b5a857c1d6 100644 (file)
@@ -105,7 +105,7 @@ examples:
           reg = <0x65>;
           interrupt-parent = <&gpio1>;
           interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
-          ti,watchdog-timer = <0>;
+          ti,watchdog-timeout-ms = <0>;
           ti,sc-ocp-limit-microamp = <2000000>;
           ti,sc-ovp-limit-microvolt = <17800000>;
           monitored-battery = <&bat>;
index 80a63d47790a24d82dfc7592d28f09fc32b77c12..c98929a213e93aeba51bbb9433a5234f1747295b 100644 (file)
@@ -51,6 +51,19 @@ patternProperties:
     description:
       Properties for single BUCK regulator.
 
+    properties:
+      op_mode:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1, 2, 3]
+        default: 1
+        description: |
+          Describes the different operating modes of the regulator with power
+          mode change in SOC. The different possible values are:
+            0 - always off mode
+            1 - on in normal mode
+            2 - low power mode
+            3 - suspend mode
+
     required:
       - regulator-name
 
@@ -63,6 +76,18 @@ patternProperties:
       Properties for single BUCK regulator.
 
     properties:
+      op_mode:
+        $ref: /schemas/types.yaml#/definitions/uint32
+        enum: [0, 1, 2, 3]
+        default: 1
+        description: |
+          Describes the different operating modes of the regulator with power
+          mode change in SOC. The different possible values are:
+            0 - always off mode
+            1 - on in normal mode
+            2 - low power mode
+            3 - suspend mode
+
       s5m8767,pmic-ext-control-gpios:
         maxItems: 1
         description: |
index 0e6249d7c1330f7e2cfc916b7c1cabbd9c9e5a4e..5e172e9462b99236f8b4b76eb976ec939444ecc4 100644 (file)
@@ -19,6 +19,9 @@ properties:
   clocks:
     maxItems: 1
 
+  interrupts:
+    maxItems: 1
+
   "#sound-dai-cells":
     const: 0
 
index 7f987e79337c89640f066fb868f7f2a3458b279d..52a78a2e362e0f2e83418bf1c881020140171b37 100644 (file)
@@ -33,6 +33,7 @@ properties:
               - rockchip,rk3328-spi
               - rockchip,rk3368-spi
               - rockchip,rk3399-spi
+              - rockchip,rk3568-spi
               - rockchip,rv1126-spi
           - const: rockchip,rk3066-spi
 
index bb68d39f03b789c0a78655ab009b93a24acee862..375baca7edcdc299628c044afe3a7ab8e79c850c 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 =================================
-NETWORK FILESYSTEM HELPER LIBRARY
+Network Filesystem Helper Library
 =================================
 
 .. Contents:
@@ -37,22 +37,22 @@ into a common call framework.
 
 The following services are provided:
 
- * Handles transparent huge pages (THPs).
+ * Handle folios that span multiple pages.
 
- * Insulates the netfs from VM interface changes.
+ * Insulate the netfs from VM interface changes.
 
- * Allows the netfs to arbitrarily split reads up into pieces, even ones that
-   don't match page sizes or page alignments and that may cross pages.
+ * Allow the netfs to arbitrarily split reads up into pieces, even ones that
+   don't match folio sizes or folio alignments and that may cross folios.
 
- * Allows the netfs to expand a readahead request in both directions to meet
-   its needs.
+ * Allow the netfs to expand a readahead request in both directions to meet its
+   needs.
 
- * Allows the netfs to partially fulfil a read, which will then be resubmitted.
+ * Allow the netfs to partially fulfil a read, which will then be resubmitted.
 
- * Handles local caching, allowing cached data and server-read data to be
+ * Handle local caching, allowing cached data and server-read data to be
    interleaved for a single request.
 
- * Handles clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that aren't on the server.
 
  * Handle retrying of reads that failed, switching reads from the cache to the
    server as necessary.
@@ -70,22 +70,22 @@ Read Helper Functions
 
 Three read helpers are provided::
 
* void netfs_readahead(struct readahead_control *ractl,
-                       const struct netfs_read_request_ops *ops,
-                       void *netfs_priv);``
* int netfs_readpage(struct file *file,
-                     struct page *page,
-                     const struct netfs_read_request_ops *ops,
-                     void *netfs_priv);
* int netfs_write_begin(struct file *file,
-                        struct address_space *mapping,
-                        loff_t pos,
-                        unsigned int len,
-                        unsigned int flags,
-                        struct page **_page,
-                        void **_fsdata,
-                        const struct netfs_read_request_ops *ops,
-                        void *netfs_priv);
      void netfs_readahead(struct readahead_control *ractl,
+                            const struct netfs_read_request_ops *ops,
+                            void *netfs_priv);
      int netfs_readpage(struct file *file,
+                          struct folio *folio,
+                          const struct netfs_read_request_ops *ops,
+                          void *netfs_priv);
      int netfs_write_begin(struct file *file,
+                             struct address_space *mapping,
+                             loff_t pos,
+                             unsigned int len,
+                             unsigned int flags,
+                             struct folio **_folio,
+                             void **_fsdata,
+                             const struct netfs_read_request_ops *ops,
+                             void *netfs_priv);
 
 Each corresponds to a VM operation, with the addition of a couple of parameters
 for the use of the read helpers:
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
 For ->readahead() and ->readpage(), the network filesystem should just jump
 into the corresponding read helper; whereas for ->write_begin(), it may be a
 little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired page if an
-error occurs after calling the helper.
+conflicting writes or track dirty data and needs to put the acquired folio if
+an error occurs after calling the helper.
 
 The helpers manage the read request, calling back into the network filesystem
 through the suppplied table of operations.  Waits will be performed as
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
                void (*issue_op)(struct netfs_read_subrequest *subreq);
                bool (*is_still_valid)(struct netfs_read_request *rreq);
                int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
-                                        struct page *page, void **_fsdata);
+                                        struct folio *folio, void **_fsdata);
                void (*done)(struct netfs_read_request *rreq);
                void (*cleanup)(struct address_space *mapping, void *netfs_priv);
        };
@@ -313,13 +313,14 @@ The operations are as follows:
 
    There is no return value; the netfs_subreq_terminated() function should be
    called to indicate whether or not the operation succeeded and how much data
-   it transferred.  The filesystem also should not deal with setting pages
+   it transferred.  The filesystem also should not deal with setting folios
    uptodate, unlocking them or dropping their refs - the helpers need to deal
    with this as they have to coordinate with copying to the local cache.
 
-   Note that the helpers have the pages locked, but not pinned.  It is possible
-   to use the ITER_XARRAY iov iterator to refer to the range of the inode that
-   is being operated upon without the need to allocate large bvec tables.
+   Note that the helpers have the folios locked, but not pinned.  It is
+   possible to use the ITER_XARRAY iov iterator to refer to the range of the
+   inode that is being operated upon without the need to allocate large bvec
+   tables.
 
  * ``is_still_valid()``
 
@@ -330,15 +331,15 @@ The operations are as follows:
  * ``check_write_begin()``
 
    [Optional] This is called from the netfs_write_begin() helper once it has
-   allocated/grabbed the page to be modified to allow the filesystem to flush
+   allocated/grabbed the folio to be modified to allow the filesystem to flush
    conflicting state before allowing it to be modified.
 
-   It should return 0 if everything is now fine, -EAGAIN if the page should be
+   It should return 0 if everything is now fine, -EAGAIN if the folio should be
    regrabbed and any other error code to abort the operation.
 
  * ``done``
 
-   [Optional] This is called after the pages in the request have all been
+   [Optional] This is called after the folios in the request have all been
    unlocked (and marked uptodate if applicable).
 
  * ``cleanup``
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
      * If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
        end of the slice instead of reissuing.
 
- * Once the data is read, the pages that have been fully read/cleared:
+ * Once the data is read, the folios that have been fully read/cleared:
 
    * Will be marked uptodate.
 
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
 
    * Unlocked
 
- * Any pages that need writing to the cache will then have DIO writes issued.
+ * Any folios that need writing to the cache will then have DIO writes issued.
 
  * Synchronous operations will wait for reading to be complete.
 
- * Writes to the cache will proceed asynchronously and the pages will have the
+ * Writes to the cache will proceed asynchronously and the folios will have the
    PG_fscache mark removed when that completes.
 
  * The request structures will be cleaned up when everything has completed.
@@ -452,6 +453,9 @@ operation table looks like the following::
                            netfs_io_terminated_t term_func,
                            void *term_func_priv);
 
+               int (*prepare_write)(struct netfs_cache_resources *cres,
+                                    loff_t *_start, size_t *_len, loff_t i_size);
+
                int (*write)(struct netfs_cache_resources *cres,
                             loff_t start_pos,
                             struct iov_iter *iter,
@@ -509,6 +513,14 @@ The methods defined in the table are:
    indicating whether the termination is definitely happening in the caller's
    context.
 
+ * ``prepare_write()``
+
+   [Required] Called to adjust a write to the cache and check that there is
+   sufficient space in the cache.  The start and length values indicate the
+   size of the write that netfslib is proposing, and this can be adjusted by
+   the cache to respect DIO boundaries.  The file size is passed for
+   information.
+
  * ``write()``
 
    [Required] Called to write to the cache.  The start file offset is given
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
 there isn't a read request structure as well, such as writing dirty data to the
 cache.
 
+
+API Function Reference
+======================
+
 .. kernel-doc:: include/linux/netfs.h
+.. kernel-doc:: fs/netfs/read_helper.c
index ddada4a53749364b86a9d11f11d2dcf4b7470ca6..4fd7b70fcde19737652a75a754c1ab6b5d99f46c 100644 (file)
@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
   spin_lock(&p->lock);
   p->count += this_cpu_read(var2);
 
-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
-which makes the above code fully equivalent. On a PREEMPT_RT kernel
 migrate_disable() ensures that the task is pinned on the current CPU which
 in turn guarantees that the per-CPU access to var1 and var2 are staying on
-the same CPU.
+the same CPU while the task remains preemptible.
 
 The migrate_disable() substitution is not valid for the following
 scenario::
@@ -456,9 +454,8 @@ scenario::
     p = this_cpu_ptr(&var1);
     p->val = func2();
 
-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
-here migrate_disable() does not protect against reentrancy from a
-preempting task. A correct substitution for this case is::
+This breaks because migrate_disable() does not protect against reentrancy from
+a preempting task. A correct substitution for this case is::
 
   func()
   {
index 31cfd7d674a6cd767e31e0feb36e3fd1f4b127db..c0a789b0080635afdd208dc7600a9811f700347d 100644 (file)
@@ -196,11 +196,12 @@ ad_actor_sys_prio
 ad_actor_system
 
        In an AD system, this specifies the mac-address for the actor in
-       protocol packet exchanges (LACPDUs). The value cannot be NULL or
-       multicast. It is preferred to have the local-admin bit set for this
-       mac but driver does not enforce it. If the value is not given then
-       system defaults to using the masters' mac address as actors' system
-       address.
+       protocol packet exchanges (LACPDUs). The value cannot be a multicast
+       address. If the all-zeroes MAC is specified, bonding will internally
+       use the MAC of the bond itself. It is preferred to have the
+       local-admin bit set for this mac but driver does not enforce it. If
+       the value is not given then system defaults to using the masters'
+       mac address as actors' system address.
 
        This parameter has effect only in 802.3ad mode and is available through
        SysFs interface.
index d638b5a8aadd4da77c24849ebfc2f862ed0bb9c6..199647729251e642ce76b7e81bd1e21a1a754a75 100644 (file)
@@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
   IRQ config, enable, reset
 
 DPNI (Datapath Network Interface)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Contains TX/RX queues, network interface configuration, and RX buffer pool
 configuration mechanisms.  The TX/RX queues are in memory and are identified
 by queue number.
index f1d5233e5e510929ba6d3f4f8cd049dc8767677c..0a233b17c664e202b245e65061cb28fd0f8621ea 100644 (file)
@@ -440,6 +440,22 @@ NOTE: For 82599-based network connections, if you are enabling jumbo frames in
 a virtual function (VF), jumbo frames must first be enabled in the physical
 function (PF). The VF MTU setting cannot be larger than the PF MTU.
 
+NBASE-T Support
+---------------
+The ixgbe driver supports NBASE-T on some devices. However, the advertisement
+of NBASE-T speeds is suppressed by default, to accommodate broken network
+switches which cannot cope with advertised NBASE-T speeds. Use the ethtool
+command to enable advertising NBASE-T speeds on devices which support it::
+
+  ethtool -s eth? advertise 0x1800000001028
+
+On Linux systems with INTERFACES(5), this can be specified as a pre-up command
+in /etc/network/interfaces so that the interface is always brought up with
+NBASE-T support, e.g.::
+
+  iface eth? inet dhcp
+       pre-up ethtool -s eth? advertise 0x1800000001028 || true
+
 Generic Receive Offload, aka GRO
 --------------------------------
 The driver supports the in-kernel software implementation of GRO. GRO has
index 80b13353254a09c4577a096a73ce32b78f68a8dd..f5809206eb93d164b266d4f2c4ad5c1f71c2a573 100644 (file)
@@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
   and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
 - As soon as the driver has sent the packet and/or obtained a
   hardware time stamp for it, it passes the time stamp back by
-  calling skb_hwtstamp_tx() with the original skb, the raw
-  hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+  calling skb_tstamp_tx() with the original skb, the raw
+  hardware time stamp. skb_tstamp_tx() clones the original skb and
   adds the timestamps, therefore the original skb has to be freed now.
   If obtaining the hardware time stamp somehow fails, then the driver
   should not fall back to software time stamping. The rationale is that
index b398b857641758aa81588e6acc3690d81c6a8677..cf908d79666e423b27e352e68a093b44395bd019 100644 (file)
@@ -35,6 +35,7 @@ GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
 bison                  2.0              bison --version
+pahole                 1.16             pahole --version
 util-linux             2.10o            fdformat --version
 kmod                   13               depmod -V
 e2fsprogs              1.41.4           e2fsck -V
@@ -108,6 +109,16 @@ Bison
 Since Linux 4.16, the build system generates parsers
 during build.  This requires bison 2.0 or later.
 
+pahole:
+-------
+
+Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
+generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
+modules as well.  This requires pahole v1.16 or later.
+
+It is found in the 'dwarves' or 'pahole' distro packages or from
+https://fedorapeople.org/~acme/dwarves/.
+
 Perl
 ----
 
index da085d63af9b818cec2b87d3b69df0c23d677320..6b3aaed66fba105a4c4afdf418af038b086ef2c8 100644 (file)
@@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
 Documentation/process/submit-checklist.rst
 for a list of items to check before submitting code.  If you are submitting
 a driver, also read Documentation/process/submitting-drivers.rst; for device
-tree binding patches, read Documentation/process/submitting-patches.rst.
+tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.rst.
 
 This documentation assumes that you're using ``git`` to prepare your patches.
 If you're unfamiliar with ``git``, you would be well-advised to learn how to
index 0ea967d34583878dc48dba1493b4157945fd727d..d25335993e55309d8c7799178231a251ec890e42 100644 (file)
@@ -326,6 +326,8 @@ usi-headset
     Headset support on USI machines
 dual-codecs
     Lenovo laptops with dual codecs
+alc285-hp-amp-init
+    HP laptops which require speaker amplifier initialization (ALC285)
 
 ALC680
 ======
index 360e9aa0205d69c77a011e072d3b30b009bad27b..fb18ce7168aa72b57305940b838794507e3a505b 100644 (file)
@@ -3066,7 +3066,7 @@ F:        Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
 F:     drivers/phy/qualcomm/phy-ath79-usb.c
 
 ATHEROS ATH GENERIC UTILITIES
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/ath/*
@@ -3081,7 +3081,7 @@ W:        https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
 F:     drivers/net/wireless/ath/ath5k/
 
 ATHEROS ATH6KL WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
@@ -9329,7 +9329,6 @@ S:        Maintained
 F:     drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
-M:     Doug Ledford <dledford@redhat.com>
 M:     Jason Gunthorpe <jgg@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -10280,9 +10279,9 @@ F:      lib/Kconfig.kcsan
 F:     scripts/Makefile.kcsan
 
 KDUMP
-M:     Dave Young <dyoung@redhat.com>
 M:     Baoquan He <bhe@redhat.com>
 R:     Vivek Goyal <vgoyal@redhat.com>
+R:     Dave Young <dyoung@redhat.com>
 L:     kexec@lists.infradead.org
 S:     Maintained
 W:     http://lse.sourceforge.net/kdump/
@@ -12180,8 +12179,8 @@ F:      drivers/net/ethernet/mellanox/mlx5/core/fpga/*
 F:     include/linux/mlx5/mlx5_ifc_fpga.h
 
 MELLANOX ETHERNET SWITCH DRIVERS
-M:     Jiri Pirko <jiri@nvidia.com>
 M:     Ido Schimmel <idosch@nvidia.com>
+M:     Petr Machata <petrm@nvidia.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -13249,7 +13248,7 @@ F:      include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 
 NETWORKING DRIVERS (WIRELESS)
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 Q:     http://patchwork.kernel.org/project/linux-wireless/list/
@@ -14846,7 +14845,7 @@ PCIE DRIVER FOR MEDIATEK
 M:     Ryder Lee <ryder.lee@mediatek.com>
 M:     Jianjun Wang <jianjun.wang@mediatek.com>
 L:     linux-pci@vger.kernel.org
-L:     linux-mediatek@lists.infradead.org
+L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     Documentation/devicetree/bindings/pci/mediatek*
 F:     drivers/pci/controller/*mediatek*
@@ -15705,7 +15704,7 @@ T:      git git://linuxtv.org/anttip/media_tree.git
 F:     drivers/media/tuners/qt1010*
 
 QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     ath10k@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -15713,7 +15712,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath10k/
 
 QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     ath11k@lists.infradead.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -15771,6 +15770,15 @@ S:     Maintained
 F:     Documentation/devicetree/bindings/net/qcom,ethqos.txt
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
 
+QUALCOMM FASTRPC DRIVER
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M:     Amol Maheshwari <amahesh@qti.qualcomm.com>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+F:     drivers/misc/fastrpc.c
+F:     include/uapi/misc/fastrpc.h
+
 QUALCOMM GENERIC INTERFACE I2C DRIVER
 M:     Akash Asthana <akashast@codeaurora.org>
 M:     Mukesh Savaliya <msavaliy@codeaurora.org>
@@ -15877,7 +15885,7 @@ F:      Documentation/devicetree/bindings/media/*venus*
 F:     drivers/media/platform/qcom/venus/
 
 QUALCOMM WCN36XX WIRELESS DRIVER
-M:     Kalle Valo <kvalo@codeaurora.org>
+M:     Kalle Valo <kvalo@kernel.org>
 L:     wcn36xx@lists.infradead.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
@@ -15979,6 +15987,7 @@ F:      arch/mips/generic/board-ranchu.c
 
 RANDOM NUMBER DRIVER
 M:     "Theodore Ts'o" <tytso@mit.edu>
+M:     Jason A. Donenfeld <Jason@zx2c4.com>
 S:     Maintained
 F:     drivers/char/random.c
 
@@ -16501,6 +16510,12 @@ T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
 F:     drivers/media/platform/sunxi/sun8i-rotate/
 
+RPMSG TTY DRIVER
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
+L:     linux-remoteproc@vger.kernel.org
+S:     Maintained
+F:     drivers/tty/rpmsg_tty.c
+
 RTL2830 MEDIA DRIVER
 M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
@@ -16622,8 +16637,8 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/iommu/s390-iommu.c
 
 S390 IUCV NETWORK LAYER
-M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -16633,8 +16648,8 @@ F:      include/net/iucv/
 F:     net/iucv/
 
 S390 NETWORK DRIVERS
-M:     Julian Wiedmann <jwi@linux.ibm.com>
-M:     Karsten Graul <kgraul@linux.ibm.com>
+M:     Alexandra Winter <wintera@linux.ibm.com>
+M:     Wenjia Zhang <wenjia@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -17408,7 +17423,7 @@ F:      drivers/video/fbdev/sm712*
 SILVACO I3C DUAL-ROLE MASTER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
 M:     Conor Culhane <conor.culhane@silvaco.com>
-L:     linux-i3c@lists.infradead.org
+L:     linux-i3c@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
 F:     drivers/i3c/master/svc-i3c-master.c
@@ -21044,7 +21059,7 @@ S:      Maintained
 F:     arch/x86/kernel/cpu/zhaoxin.c
 
 ZONEFS FILESYSTEM
-M:     Damien Le Moal <damien.lemoal@wdc.com>
+M:     Damien Le Moal <damien.lemoal@opensource.wdc.com>
 M:     Naohiro Aota <naohiro.aota@wdc.com>
 R:     Johannes Thumshirn <jth@kernel.org>
 L:     linux-fsdevel@vger.kernel.org
index 0a6ecc8bb2d2b71f7d1c000c157f856925159941..17b4319ad2ff23e8c1cde951ff6a0afbeebee44f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Gobble Gobble
 
 # *DOCUMENTATION*
@@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 KBUILD_CFLAGS += $(stackp-flags-y)
 
 KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
 
 ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1374,17 +1374,17 @@ endif
 
 ifneq ($(dtstree),)
 
-%.dtb: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtb: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
-%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc
-       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtbo: include/config/kernel.release scripts_dtc
+       $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
 
 PHONY += dtbs dtbs_install dtbs_check
 dtbs: include/config/kernel.release scripts_dtc
        $(Q)$(MAKE) $(build)=$(dtstree)
 
-ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),)
+ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
 export CHECK_DTBS=y
 dtbs: dt_binding_check
 endif
index b62a0dbb033ff19487bdd0083e033c01d6d05c43..ec6fba5ee8fdeb890fb7c87edcd05622550faa05 100644 (file)
 
                ethphy: ethernet-phy@1 {
                        reg = <1>;
+                       qca,clk-out-frequency = <125000000>;
                };
        };
 };
index 7648e8a02000a3f37e1b14bb8b83f9683f94be38..cf6571cc4682e226773b5c996a44cb45b23e57ff 100644 (file)
                                label = "cpu";
                                ethernet = <&fec>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <100>;
index eb025a9d4759255e57a3a3ba9b491167809278a2..7328d4ef8559f021aa087e5159f3275aef345452 100644 (file)
@@ -82,6 +82,6 @@
 #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS                         0x01F4 0x0480 0x0000 0x9 0x0
 #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK                        0x01F8 0x0484 0x0000 0x9 0x0
 #define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0                       0x01FC 0x0488 0x0000 0x9 0x0
-#define MX6ULL_PAD_CSI_DATA07__ESAI_T                           0x0200 0x048C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0                           0x0200 0x048C 0x0000 0x9 0x0
 
 #endif /* __DTS_IMX6ULL_PINFUNC_H */
index ff0ffb22768b377aee7e594412989d75a84ae5cc..1ea32fff41201b7a787dd116eef675318861ad19 100644 (file)
@@ -91,6 +91,8 @@
                                /* Internal port connected to eth2 */
                                ethernet = <&enet2>;
                                phy-mode = "rgmii";
+                               rx-internal-delay-ps = <0>;
+                               tx-internal-delay-ps = <0>;
                                reg = <4>;
 
                                fixed-link {
index 2b645642b9352c13d2d0797d68a2053ec35df597..2a745522404d6b065ef71bbde83e648875d95014 100644 (file)
@@ -12,7 +12,7 @@
        flash0: n25q00@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00aa";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 90e676e7019f23377dd05bbf70b45f7af0d47ea5..1b02d46496a852786705a5875e5e5adc20d9540d 100644 (file)
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q256a";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 6f138b2b26163ada37d0c1cb0462ed70d44a4b4e..51bb436784e241470d7aba219b38323a37357007 100644 (file)
        flash0: n25q00@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;      /* chip select */
                spi-max-frequency = <100000000>;
 
index c155ff02eb6e035a7c7e526635a80a5feda9b4d8..cae9ddd5ed38bbd57efe8371724e59aa49fb0ac2 100644 (file)
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index 8d5d3996f6f27122412d68072767d621fa11ae8f..ca18b959e6559ebdd4515d71877211022559be19 100644 (file)
@@ -80,7 +80,7 @@
        flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q256a";
+               compatible = "micron,n25q256a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
                m25p,fast-read;
index 99a71757cdf46330418b65aeae3646668229c378..3f7aa7bf0863aa1150b64ad4a58e120a1bdf7fc1 100644 (file)
        flash0: n25q512a@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q512a";
+               compatible = "micron,n25q512a", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <100000000>;
 
index a060718758b6755a154c1842f95301d1a8c93806..25874e1b9c82987e090c7229cad34a07edb3c674 100644 (file)
        n25q128@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q128";
+               compatible = "micron,n25q128", "jedec,spi-nor";
                reg = <0>;              /* chip select */
                spi-max-frequency = <100000000>;
                m25p,fast-read;
        n25q00@1 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "n25q00";
+               compatible = "micron,mt25qu02g", "jedec,spi-nor";
                reg = <1>;              /* chip select */
                spi-max-frequency = <100000000>;
                m25p,fast-read;
index deff286eb5ea00486e1b18a01696fa704d67f728..5cd057859fe909ca573f21980ca1b3585fe6050c 100644 (file)
@@ -596,11 +596,9 @@ call_fpe:
        tstne   r0, #0x04000000                 @ bit 26 set on both ARM and Thumb-2
        reteq   lr
        and     r8, r0, #0x00000f00             @ mask out CP number
- THUMB(        lsr     r8, r8, #8              )
        mov     r7, #1
-       add     r6, r10, #TI_USED_CP
- ARM(  strb    r7, [r6, r8, lsr #8]    )       @ set appropriate used_cp[]
- THUMB(        strb    r7, [r6, r8]            )       @ set appropriate used_cp[]
+       add     r6, r10, r8, lsr #8             @ add used_cp[] array offset first
+       strb    r7, [r6, #TI_USED_CP]           @ set appropriate used_cp[]
 #ifdef CONFIG_IWMMXT
        @ Test if we need to give access to iWMMXt coprocessors
        ldr     r5, [r10, #TI_FLAGS]
@@ -609,7 +607,7 @@ call_fpe:
        bcs     iwmmxt_task_enable
 #endif
  ARM(  add     pc, pc, r8, lsr #6      )
- THUMB(        lsl     r8, r8, #2              )
+ THUMB(        lsr     r8, r8, #6              )
  THUMB(        add     pc, r8                  )
        nop
 
index fadfee9e2b45e417dff13f27ed4156822cd99e20..950bef83339f5e5bcde33333b2f081a546f98297 100644 (file)
@@ -114,6 +114,7 @@ ENTRY(secondary_startup)
        add     r12, r12, r10
        ret     r12
 1:     bl      __after_proc_init
+       ldr     r7, __secondary_data            @ reload r7
        ldr     sp, [r7, #12]                   @ set up the stack pointer
        ldr     r0, [r7, #16]                   @ set up task pointer
        mov     fp, #0
index d60856898d97acaa51912e6097a5d968200b45bd..5ec58d004b7de85793fbbe9a01febf7dd0e399f6 100644 (file)
@@ -189,7 +189,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
        rockchip_boot_fn = __pa_symbol(secondary_startup);
 
        /* copy the trampoline to sram, that runs during startup of the core */
-       memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+       memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
        flush_cache_all();
        outer_clean_range(0, trampoline_sz);
 
index 1aa8b70732186e167f065b7175030529d2d1e293..54e3910e8b9bd4332e3ab689181de8c6ecbe9245 100644 (file)
@@ -161,7 +161,6 @@ config ARCH_MEDIATEK
 
 config ARCH_MESON
        bool "Amlogic Platforms"
-       select COMMON_CLK
        help
          This enables support for the arm64 based Amlogic SoCs
          such as the s905, S905X/D, S912, A113X/D or S905X/D2
index d13980ed7a79a41c2c9024f7b2d0fe3ba082fbc9..7ec5ac850a0dc576a9680aac773ad9792387a9c6 100644 (file)
@@ -69,7 +69,7 @@
        pinctrl-0 = <&emac_rgmii_pins>;
        phy-supply = <&reg_gmac_3v3>;
        phy-handle = <&ext_rgmii_phy>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        status = "okay";
 };
 
index 52ebe371df2685ba89580883ce81149aa401398e..561eec21b4deb894ef206c43c8695cc0cb9655b2 100644 (file)
                                        type = "critical";
                                };
                        };
-               };
 
-               cpu_cooling_maps: cooling-maps {
-                       map0 {
-                               trip = <&cpu_passive>;
-                               cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
-                       };
+                       cpu_cooling_maps: cooling-maps {
+                               map0 {
+                                       trip = <&cpu_passive>;
+                                       cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               };
 
-                       map1 {
-                               trip = <&cpu_hot>;
-                               cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
-                                               <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               map1 {
+                                       trip = <&cpu_hot>;
+                                       cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+                                                       <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+                               };
                        };
                };
        };
index 33a80f9501dcaec0f986d1bdaa91ae7d0c6a7027..02c36301e985087d73ebed31f465f5c8b6d0a6f5 100644 (file)
@@ -60,7 +60,7 @@
 
 &port02 {
        bus-range = <3 3>;
-       ethernet0: pci@0,0 {
+       ethernet0: ethernet@0,0 {
                reg = <0x30000 0x0 0x0 0x0 0x0>;
                /* To be filled by the loader */
                local-mac-address = [00 10 18 00 00 00];
index fc8b2bb06ffe83b135daa2c041f6ddc6904c7070..8b61e7fd3e9c9844f8a02b747a3135474391c268 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright The Asahi Linux Contributors
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/apple-aic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pinctrl/apple.h>
                        apple,npins = <212>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 190 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 191 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <42>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 268 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 269 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <23>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 330 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 331 IRQ_TYPE_LEVEL_HIGH>,
                        apple,npins = <16>;
 
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupt-parent = <&aic>;
                        interrupts = <AIC_IRQ 391 IRQ_TYPE_LEVEL_HIGH>,
                                     <AIC_IRQ 392 IRQ_TYPE_LEVEL_HIGH>,
                        port00: pci@0,0 {
                                device_type = "pci";
                                reg = <0x0 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 152 0>;
+                               reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port01: pci@1,0 {
                                device_type = "pci";
                                reg = <0x800 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 153 0>;
+                               reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
                                max-link-speed = <2>;
 
                                #address-cells = <3>;
                        port02: pci@2,0 {
                                device_type = "pci";
                                reg = <0x1000 0x0 0x0 0x0 0x0>;
-                               reset-gpios = <&pinctrl_ap 33 0>;
+                               reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
                                max-link-speed = <1>;
 
                                #address-cells = <3>;
index 3063851c2fb91e87248cc5c09b9148675b993b85..d3f03dcbb8c381c5a083bb0cc2f4e163ccb8e59e 100644 (file)
@@ -38,7 +38,6 @@
                powerdn {
                        label = "External Power Down";
                        gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
-                       interrupts = <&gpio1 17 IRQ_TYPE_EDGE_FALLING>;
                        linux,code = <KEY_POWER>;
                };
 
@@ -46,7 +45,6 @@
                admin {
                        label = "ADMIN button";
                        gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
-                       interrupts = <&gpio3 8 IRQ_TYPE_EDGE_RISING>;
                        linux,code = <KEY_WPS_BUTTON>;
                };
        };
index b21be03da0af117074f86700d15db6fef9d8d648..042c486bdda289333d953000b1d72d26cbc183da 100644 (file)
                                reg = <2>;
                                ethernet = <&dpmac17>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <1000>;
                                reg = <2>;
                                ethernet = <&dpmac18>;
                                phy-mode = "rgmii-id";
+                               rx-internal-delay-ps = <2000>;
+                               tx-internal-delay-ps = <2000>;
 
                                fixed-link {
                                        speed = <1000>;
index dc8661ebd1f6651b4eaea14675013d18245fb17c..2433e6f2eda8b706d55b61b2a1c47cc785c19466 100644 (file)
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(16)>;
-                       scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+                       scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
                        status = "disabled";
                };
 
                        clock-names = "i2c";
                        clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
                                            QORIQ_CLK_PLL_DIV(16)>;
-                       scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+                       scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
                        status = "disabled";
                };
 
index 972766b67a15e2e6c40a9dbe4f2bbd03d52fe83d..71bf497f99c251fcc1723b172c5af47e1b23d3ff 100644 (file)
                                                  <&clk IMX8MQ_VIDEO_PLL1>,
                                                  <&clk IMX8MQ_VIDEO_PLL1_OUT>;
                                assigned-clock-rates = <0>, <0>, <0>, <594000000>;
-                               interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>;
-                               interconnect-names = "dram";
                                status = "disabled";
 
                                port@0 {
index 665b2e69455dd1c8363602db9f9b63219f7d4063..ea6820902ede0401ccc7161025084ddbed6c8b34 100644 (file)
@@ -97,7 +97,7 @@
                regulator-max-microvolt = <3300000>;
                regulator-always-on;
                regulator-boot-on;
-               vim-supply = <&vcc_io>;
+               vin-supply = <&vcc_io>;
        };
 
        vdd_core: vdd-core {
index d5c7648c841dc7d12ca42aae4d0ab85b774caa3f..f1fcc6b5b402c7e95f72bf2571587951bdedfe2e 100644 (file)
 &sdhci {
        bus-width = <8>;
        mmc-hs400-1_8v;
-       mmc-hs400-enhanced-strobe;
        non-removable;
        status = "okay";
 };
index 63c7681843daa60624ebf29e94f4aa335f440b63..b6ac00f6461370782a616643300746a9be011d4a 100644 (file)
                clock-output-names = "xin32k", "rk808-clkout2";
                pinctrl-names = "default";
                pinctrl-0 = <&pmic_int_l>;
+               rockchip,system-power-controller;
                vcc1-supply = <&vcc5v0_sys>;
                vcc2-supply = <&vcc5v0_sys>;
                vcc3-supply = <&vcc5v0_sys>;
index 7c93f840bc64f9f2cca0f1bb7fae3eff9a452b23..e890166e7fd43701c0c5febe1d78acce77ccf354 100644 (file)
@@ -55,7 +55,7 @@
                regulator-boot-on;
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               vim-supply = <&vcc3v3_sys>;
+               vin-supply = <&vcc3v3_sys>;
        };
 
        vcc3v3_sys: vcc3v3-sys {
index 98136c88fa49792e8bff5f6de2dcc147a454b3a8..6a434be6281930aa4b8d50634d52915717c64ab4 100644 (file)
        status = "okay";
 
        bt656-supply = <&vcc_3v0>;
-       audio-supply = <&vcc_3v0>;
+       audio-supply = <&vcc1v8_codec>;
        sdmmc-supply = <&vcc_sdio>;
        gpio1830-supply = <&vcc_3v0>;
 };
index a39fcf318c774df52e72ceebbdf139756328c1a0..01d47c5886dc43a6925116e3ba38c0a52aaf5657 100644 (file)
@@ -91,7 +91,7 @@
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
 
 /* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1           ((1 << 31) | (1 << 23))
+#define TCR_EL2_RES1           ((1U << 31) | (1 << 23))
 #define TCR_EL2_TBI            (1 << 20)
 #define TCR_EL2_PS_SHIFT       16
 #define TCR_EL2_PS_MASK                (7 << TCR_EL2_PS_SHIFT)
 #define CPTR_EL2_TFP_SHIFT 10
 
 /* Hyp Coprocessor Trap Register */
-#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TCPAC (1U << 31)
 #define CPTR_EL2_TAM   (1 << 30)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
index b3e4f9a088b1a76118205f0065fda2cc6859aa2f..8cf970d219f5d896a7805c446e9854dadf7d0cd9 100644 (file)
        .endm
 
 SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       1
        b       ftrace_common
 SYM_CODE_END(ftrace_regs_caller)
 
 SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+       BTI_C
+#endif
        ftrace_regs_entry       0
        b       ftrace_common
 SYM_CODE_END(ftrace_caller)
index 1038494135c8cef847829ebcf43b6fa58d596770..6fb31c117ebe08cab0898cd9a8ca552e3c4a7026 100644 (file)
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
        if (rc)
                return rc;
        kimage->arch.ttbr1 = __pa(trans_pgd);
-       kimage->arch.zero_page = __pa(empty_zero_page);
+       kimage->arch.zero_page = __pa_symbol(empty_zero_page);
 
        reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
        memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
index 63634b4d72c158f3e3ea487b3ae36d8a75f0a398..59c648d518488869b6cc4c6b227933aa3419bedd 100644 (file)
@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image,
                                           initrd_len, cmdline, 0);
        if (!dtb) {
                pr_err("Preparing for new dtb failed\n");
+               ret = -EINVAL;
                goto out_err;
        }
 
index 7a0af1d39303cd580a097e42b14fc287f6ff0c92..96c5f3fb78389ef8be01890df1d8c8fa45d1c6e2 100644 (file)
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
 
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+
 /*
  * Allow the hypervisor to handle the exit with an exit handler if it has one.
  *
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
  */
 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
+       /*
+        * Save PSTATE early so that we can evaluate the vcpu mode
+        * early on.
+        */
+       vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
+       /*
+        * Check whether we want to repaint the state one way or
+        * another.
+        */
+       early_exit_filter(vcpu, exit_code);
+
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 
index de7e14c862e6c9b5415df6a7daf815142a302a16..7ecca8b078519fd315c92cbc7cc059c0d2269bed 100644 (file)
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->regs.pc                   = read_sysreg_el2(SYS_ELR);
-       ctxt->regs.pstate               = read_sysreg_el2(SYS_SPSR);
+       /*
+        * Guest PSTATE gets saved at guest fixup time in all
+        * cases. We still need to handle the nVHE host side here.
+        */
+       if (!has_vhe() && ctxt->__hyp_running_vcpu)
+               ctxt->regs.pstate       = read_sysreg_el2(SYS_SPSR);
 
        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
index c0e3fed26d93068bb1953fed35be5e076f42b096..d13115a124341601f6d0f3367dc5d7c6f7095806 100644 (file)
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
  * Returns false if the guest ran in AArch32 when it shouldn't have, and
  * thus should exit to the host, or true if a the guest run loop can continue.
  */
-static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        struct kvm *kvm = kern_hyp_va(vcpu->kvm);
 
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
                vcpu->arch.target = -1;
                *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
                *exit_code |= ARM_EXCEPTION_IL;
-               return false;
        }
-
-       return true;
 }
 
 /* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);
 
-               if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
-                       break;
-
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
index 5a2cb5d9bc4b22a55e7afb591962b741619c8446..fbb26b93c347738ce85f27e090ddff776c08e16d 100644 (file)
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
        return hyp_exit_handlers;
 }
 
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+}
+
 /* Switch to the guest for VHE systems running in EL2 */
 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
 {
index e5fbf8653a2158825d660b4e8332f0baab836cda..2020af88b6361df9570f878d08512f08563d2362 100644 (file)
@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
 
 asmlinkage void do_trap_fpe(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        return fpu_fpe(regs);
 #else
        do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
 
 asmlinkage void do_trap_priv(struct pt_regs *regs)
 {
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
        if (user_mode(regs) && fpu_libc_helper(regs))
                return;
 #endif
index 05d14c21c41784e6b9ad2ea8ed68feed7079bbb5..f7af11ea2d61297845773df23a88ee258a4664f5 100644 (file)
@@ -6,5 +6,7 @@
 #define PCI_IOSIZE     SZ_64K
 #define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
 
+#define pci_remap_iospace pci_remap_iospace
+
 #include <asm/mach-generic/spaces.h>
 #endif
index 421231f55935af9c89021e5f9a8ab5a92f4de3b4..9ffc8192adae86d79434481997fad100917295e7 100644 (file)
 #include <linux/list.h>
 #include <linux/of.h>
 
-#ifdef CONFIG_PCI_DRIVERS_GENERIC
-#define pci_remap_iospace pci_remap_iospace
-#endif
-
 #ifdef CONFIG_PCI_DRIVERS_LEGACY
 
 /*
index 6f3a7b07294b8340707f01e0f22491a04a1d0e71..a37fe20818eb9c02d971cd42d7de3c69628b6ca2 100644 (file)
@@ -98,7 +98,7 @@ do {                                                          \
 #define emit(...) __emit(__VA_ARGS__)
 
 /* Workaround for R10000 ll/sc errata */
-#ifdef CONFIG_WAR_R10000
+#ifdef CONFIG_WAR_R10000_LLSC
 #define LLSC_beqz      beqzl
 #else
 #define LLSC_beqz      beqz
index 18eb8a453a86231d458942b84953f79224a72ac0..d2d68bac3d250aa1eca9d7180a5891cf6c076aa9 100644 (file)
@@ -47,6 +47,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
        pci_read_bridge_bases(bus);
 }
 
+#ifdef pci_remap_iospace
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
 {
        unsigned long vaddr;
@@ -60,3 +61,4 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
        set_io_port_base(vaddr);
        return 0;
 }
+#endif
index b2188da09c732d7d4c883760c85803cc7f130e01..011dc32fdb4d5a5e63ae740e6fe944bade41dd9d 100644 (file)
@@ -85,11 +85,6 @@ config MMU
 config STACK_GROWSUP
        def_bool y
 
-config ARCH_DEFCONFIG
-       string
-       default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
-       default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
-
 config GENERIC_LOCKBREAK
        bool
        default y
index 8db4af4879d02f63c84a72e9304a71586bf70f7c..82d77f4b0d0832b92891899aff9f1b797114d37c 100644 (file)
 # Mike Shaver, Helge Deller and Martin K. Petersen
 #
 
+ifdef CONFIG_PARISC_SELF_EXTRACT
+boot := arch/parisc/boot
+KBUILD_IMAGE := $(boot)/bzImage
+else
 KBUILD_IMAGE := vmlinuz
+endif
 
 NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
index d2daeac2b21757cdf62c3c13bf175a58dabc5a44..1b8fd80cbe7f89ad43a89beef21bdadb813b56dd 100644 (file)
@@ -1,7 +1,9 @@
 CONFIG_LOCALVERSION="-64bit"
 # CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZ4=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BINFMT_MISC=m
 # CONFIG_COMPACTION is not set
+CONFIG_MEMORY_FAILURE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
 CONFIG_SCSI_SRP_ATTRS=y
 CONFIG_ISCSI_BOOT_SYSFS=y
 CONFIG_SCSI_MPT2SAS=y
-CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_LASI700=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_ZALON=y
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_DH=y
 CONFIG_ATA=y
+CONFIG_SATA_SIL=y
+CONFIG_SATA_SIS=y
+CONFIG_SATA_VIA=y
 CONFIG_PATA_NS87415=y
 CONFIG_PATA_SIL680=y
 CONFIG_ATA_GENERIC=y
@@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_RAID=m
 CONFIG_DM_UEVENT=y
+CONFIG_DM_AUDIT=y
 CONFIG_FUSION=y
 CONFIG_FUSION_SPI=y
 CONFIG_FUSION_SAS=y
@@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
 CONFIG_FB_MATROX_I2C=y
 CONFIG_FB_MATROX_MAVEN=y
 CONFIG_FB_RADEON=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_CLUT224 is not set
 CONFIG_HIDRAW=y
 CONFIG_HID_PID=y
 CONFIG_USB_HIDDEV=y
 CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_UIO=y
 CONFIG_UIO_PDRV_GENIRQ=m
 CONFIG_UIO_AEC=m
index 70cf8f0a7617b3465324e6eeceabb48503852781..9cd4dd6e63ad9dc578721add9beebbc9ccacd9cd 100644 (file)
@@ -14,7 +14,7 @@ static inline void
 _futex_spin_lock(u32 __user *uaddr)
 {
        extern u32 lws_lock_start[];
-       long index = ((long)uaddr & 0x3f8) >> 1;
+       long index = ((long)uaddr & 0x7f8) >> 1;
        arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
        preempt_disable();
        arch_spin_lock(s);
@@ -24,7 +24,7 @@ static inline void
 _futex_spin_unlock(u32 __user *uaddr)
 {
        extern u32 lws_lock_start[];
-       long index = ((long)uaddr & 0x3f8) >> 1;
+       long index = ((long)uaddr & 0x7f8) >> 1;
        arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
        arch_spin_unlock(s);
        preempt_enable();
index 056d588befdd63671fbaf5a2e3f1afc498d9f8b0..70d3cffb02515c7e8f99a1f493994fca16ce648b 100644 (file)
@@ -39,6 +39,7 @@ verify "$3"
 if [ -n "${INSTALLKERNEL}" ]; then
   if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
   if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
 fi
 
 # Default install
index d2497b339d139295a42a45c895a01dfd184cb352..65c88ca7a7acd381c13a8193b6d57a6a8be9d66f 100644 (file)
@@ -472,7 +472,7 @@ lws_start:
        extrd,u %r1,PSW_W_BIT,1,%r1
        /* sp must be aligned on 4, so deposit the W bit setting into
         * the bottom of sp temporarily */
-       or,ev   %r1,%r30,%r30
+       or,od   %r1,%r30,%r30
 
        /* Clip LWS number to a 32-bit value for 32-bit processes */
        depdi   0, 31, 32, %r20
index 9fb1e794831b0eb1db779c220041e04b964c883c..061119a56fbe81b0a64540de3ff6d2596f5ec112 100644 (file)
@@ -249,30 +249,16 @@ void __init time_init(void)
 static int __init init_cr16_clocksource(void)
 {
        /*
-        * The cr16 interval timers are not syncronized across CPUs on
-        * different sockets, so mark them unstable and lower rating on
-        * multi-socket SMP systems.
+        * The cr16 interval timers are not syncronized across CPUs, even if
+        * they share the same socket.
         */
        if (num_online_cpus() > 1 && !running_on_qemu) {
-               int cpu;
-               unsigned long cpu0_loc;
-               cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
-
-               for_each_online_cpu(cpu) {
-                       if (cpu == 0)
-                               continue;
-                       if ((cpu0_loc != 0) &&
-                           (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
-                               continue;
-
-                       /* mark sched_clock unstable */
-                       clear_sched_clock_stable();
-
-                       clocksource_cr16.name = "cr16_unstable";
-                       clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
-                       clocksource_cr16.rating = 0;
-                       break;
-               }
+               /* mark sched_clock unstable */
+               clear_sched_clock_stable();
+
+               clocksource_cr16.name = "cr16_unstable";
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
        }
 
        /* register at clocksource framework */
index b11fb26ce2998d7b8884c75ff05129c1f48c1db2..892b7fc8f3c45880de741d552d20c187ad0231c8 100644 (file)
@@ -730,6 +730,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
                        }
                        mmap_read_unlock(current->mm);
                }
+               /* CPU could not fetch instruction, so clear stale IIR value. */
+               regs->iir = 0xbaadf00d;
                fallthrough;
        case 27: 
                /* Data memory protection ID trap */
index 6baa676e7cb60219795d6b40a99b27a674357723..5d77d3f5fbb561192cb8892158d5ec6297e17699 100644 (file)
@@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
                              const char *name)
 {
        long reladdr;
+       func_desc_t desc;
+       int i;
 
        if (is_mprofile_ftrace_call(name))
                return create_ftrace_stub(entry, addr, me);
 
-       memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
+       for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) {
+               if (patch_instruction(&entry->jump[i],
+                                     ppc_inst(ppc64_stub_insns[i])))
+                       return 0;
+       }
 
        /* Stub uses address relative to r2. */
        reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
        }
        pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
 
-       entry->jump[0] |= PPC_HA(reladdr);
-       entry->jump[1] |= PPC_LO(reladdr);
-       entry->funcdata = func_desc(addr);
-       entry->magic = STUB_MAGIC;
+       if (patch_instruction(&entry->jump[0],
+                             ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+               return 0;
+
+       if (patch_instruction(&entry->jump[1],
+                         ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+               return 0;
+
+       // func_desc_t is 8 bytes if ABIv2, else 16 bytes
+       desc = func_desc(addr);
+       for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
+               if (patch_instruction(((u32 *)&entry->funcdata) + i,
+                                     ppc_inst(((u32 *)(&desc))[i])))
+                       return 0;
+       }
+
+       if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+               return 0;
 
        return 1;
 }
@@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
                        me->name, *instruction, instruction);
                return 0;
        }
+
        /* ld r2,R2_STACK_OFFSET(r1) */
-       *instruction = PPC_INST_LD_TOC;
+       if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+               return 0;
+
        return 1;
 }
 
@@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        }
 
                        /* Only replace bits 2 through 26 */
-                       *(uint32_t *)location
-                               = (*(uint32_t *)location & ~0x03fffffc)
+                       value = (*(uint32_t *)location & ~0x03fffffc)
                                | (value & 0x03fffffc);
+
+                       if (patch_instruction((u32 *)location, ppc_inst(value)))
+                               return -EFAULT;
+
                        break;
 
                case R_PPC64_REL64:
index 83f4a6389a282772f794fc3be09757791ac443a4..d7081e9af65c73781bce016c65841208ef91d177 100644 (file)
@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
        local_irq_save(flags);
        hard_irq_disable();
 
-       if (qoriq_pm_ops)
+       if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
                qoriq_pm_ops->cpu_up_prepare(cpu);
 
        /* if cpu is not spinning, reset it */
@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
                booting_thread_hwid = cpu_thread_in_core(nr);
                primary = cpu_first_thread_sibling(nr);
 
-               if (qoriq_pm_ops)
+               if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
                        qoriq_pm_ops->cpu_up_prepare(nr);
 
                /*
index ba304d4c455c2a449a737d56c65f3a1e4c09b37b..ced0d4e479385aa54ae571be1a05c2bd519f0286 100644 (file)
@@ -76,6 +76,7 @@
                spi-max-frequency = <20000000>;
                voltage-ranges = <3300 3300>;
                disable-wp;
+               gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
        };
 };
 
index 4f66919215f6eb1478d379a675d8c61e11f977fc..6bfa1f24d3deb45e493a0d4bb8bc75b1f06b6ffa 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2020 SiFive, Inc */
 
 #include "fu740-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 /* Clock frequency (in Hz) of the PCB crystal for rtcclk */
        temperature-sensor@4c {
                compatible = "ti,tmp451";
                reg = <0x4c>;
+               vcc-supply = <&vdd_bpro>;
                interrupt-parent = <&gpio>;
                interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
        };
 
+       eeprom@54 {
+               compatible = "microchip,24c02", "atmel,24c02";
+               reg = <0x54>;
+               vcc-supply = <&vdd_bpro>;
+               label = "board-id";
+               pagesize = <16>;
+               read-only;
+               size = <256>;
+       };
+
        pmic@58 {
                compatible = "dlg,da9063";
                reg = <0x58>;
                interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
                interrupt-controller;
 
-               regulators {
-                       vdd_bcore1: bcore1 {
-                               regulator-min-microvolt = <900000>;
-                               regulator-max-microvolt = <900000>;
-                               regulator-min-microamp = <5000000>;
-                               regulator-max-microamp = <5000000>;
-                               regulator-always-on;
-                       };
+               onkey {
+                       compatible = "dlg,da9063-onkey";
+               };
 
-                       vdd_bcore2: bcore2 {
-                               regulator-min-microvolt = <900000>;
-                               regulator-max-microvolt = <900000>;
-                               regulator-min-microamp = <5000000>;
-                               regulator-max-microamp = <5000000>;
+               rtc {
+                       compatible = "dlg,da9063-rtc";
+               };
+
+               wdt {
+                       compatible = "dlg,da9063-watchdog";
+               };
+
+               regulators {
+                       vdd_bcore: bcores-merged {
+                               regulator-min-microvolt = <1050000>;
+                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microamp = <4800000>;
+                               regulator-max-microamp = <4800000>;
                                regulator-always-on;
                        };
 
                        vdd_bpro: bpro {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <2500000>;
-                               regulator-max-microamp = <2500000>;
+                               regulator-min-microamp = <2400000>;
+                               regulator-max-microamp = <2400000>;
                                regulator-always-on;
                        };
 
                        vdd_bperi: bperi {
-                               regulator-min-microvolt = <1050000>;
-                               regulator-max-microvolt = <1050000>;
+                               regulator-min-microvolt = <1060000>;
+                               regulator-max-microvolt = <1060000>;
                                regulator-min-microamp = <1500000>;
                                regulator-max-microamp = <1500000>;
                                regulator-always-on;
                        };
 
-                       vdd_bmem: bmem {
-                               regulator-min-microvolt = <1200000>;
-                               regulator-max-microvolt = <1200000>;
-                               regulator-min-microamp = <3000000>;
-                               regulator-max-microamp = <3000000>;
-                               regulator-always-on;
-                       };
-
-                       vdd_bio: bio {
+                       vdd_bmem_bio: bmem-bio-merged {
                                regulator-min-microvolt = <1200000>;
                                regulator-max-microvolt = <1200000>;
                                regulator-min-microamp = <3000000>;
                        vdd_ldo1: ldo1 {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <100000>;
-                               regulator-max-microamp = <100000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo2: ldo2 {
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo3: ldo3 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo4: ldo4 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo5: ldo5 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <100000>;
-                               regulator-max-microamp = <100000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo6: ldo6 {
-                               regulator-min-microvolt = <3300000>;
-                               regulator-max-microvolt = <3300000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo7: ldo7 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ldo8: ldo8 {
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
 
                        vdd_ld09: ldo9 {
                                regulator-min-microvolt = <1050000>;
                                regulator-max-microvolt = <1050000>;
-                               regulator-min-microamp = <200000>;
-                               regulator-max-microamp = <200000>;
+                               regulator-always-on;
                        };
 
                        vdd_ldo10: ldo10 {
                                regulator-min-microvolt = <1000000>;
                                regulator-max-microvolt = <1000000>;
-                               regulator-min-microamp = <300000>;
-                               regulator-max-microamp = <300000>;
+                               regulator-always-on;
                        };
 
                        vdd_ldo11: ldo11 {
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
-                               regulator-min-microamp = <300000>;
-                               regulator-max-microamp = <300000>;
                                regulator-always-on;
                        };
                };
                spi-max-frequency = <20000000>;
                voltage-ranges = <3300 3300>;
                disable-wp;
+               gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
        };
 };
 
 
 &gpio {
        status = "okay";
+       gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3",
+               "PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN",
+               "ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4",
+               "EN_VDD_SD", "SD_CD";
 };
index 25ba21f98504120d8a723c13655d5bd583d35b4f..2639b9ee48f97d0b69c72177d55eb2dd1e9be1d2 100644 (file)
 #include <linux/types.h>
 #include <linux/kvm.h>
 #include <linux/kvm_types.h>
+#include <asm/csr.h>
 #include <asm/kvm_vcpu_fp.h>
 #include <asm/kvm_vcpu_timer.h>
 
-#ifdef CONFIG_64BIT
-#define KVM_MAX_VCPUS                  (1U << 16)
-#else
-#define KVM_MAX_VCPUS                  (1U << 9)
-#endif
+#define KVM_MAX_VCPUS                  \
+       ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
 
 #define KVM_HALT_POLL_NS_DEFAULT       500000
 
index d81bae8eb55ea0a0f81bed64b5c3ab589a99a00a..fc058ff5f4b6f3ac393d58ea25d0446a19fc7664 100644 (file)
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot)
 {
+       gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+       spin_lock(&kvm->mmu_lock);
+       stage2_unmap_range(kvm, gpa, size, false);
+       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
index fd825097cf048b59d8cc7486ae345e9cbec08b07..e45cc27716deee2270a796615982aba8e6c91ca3 100644 (file)
@@ -117,6 +117,7 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -403,7 +404,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -476,6 +476,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -489,6 +490,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -510,6 +512,7 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -571,6 +574,7 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -775,12 +779,14 @@ CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_RANDOM32_SELFTEST=y
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
@@ -807,6 +813,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
@@ -842,6 +849,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_DEBUG_ENTRY=y
 CONFIG_CIO_INJECT=y
 CONFIG_KUNIT=m
@@ -860,7 +868,7 @@ CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
-CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
index c9c3cedff2d85634327af0d5c36d795da74ddb01..1c750bfca2d8dd7750a7f71e027d8467e91401af 100644 (file)
@@ -109,6 +109,7 @@ CONFIG_UNIX=y
 CONFIG_UNIX_DIAG=m
 CONFIG_XFRM_USER=m
 CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
 CONFIG_SMC=m
 CONFIG_SMC_DIAG=m
 CONFIG_INET=y
@@ -394,7 +395,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
@@ -467,6 +467,7 @@ CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
 CONFIG_VXLAN=m
 CONFIG_BAREUDP=m
+CONFIG_AMT=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -480,6 +481,7 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
@@ -501,6 +503,7 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -762,12 +765,14 @@ CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
@@ -792,9 +797,11 @@ CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
 CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
 CONFIG_KUNIT=m
 CONFIG_KUNIT_DEBUGFS=y
 CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
index aceccf3b9a882a57ecd15dc24324b0a63434f17c..eed3b9acfa71aaf59f8fdd2a2dd58517c0f2e144 100644 (file)
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
 # CONFIG_ZLIB_DFLTCC is not set
+CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
index e4dc64cc9c555c11abb747e642faf6fecc910d71..287bb88f76986e127388efd03c18d117bf4c417e 100644 (file)
 
 /* I/O Map */
 #define ZPCI_IOMAP_SHIFT               48
-#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_SHIFT          62
+#define ZPCI_IOMAP_ADDR_BASE           (1UL << ZPCI_IOMAP_ADDR_SHIFT)
 #define ZPCI_IOMAP_ADDR_OFF_MASK       ((1UL << ZPCI_IOMAP_SHIFT) - 1)
 #define ZPCI_IOMAP_MAX_ENTRIES                                                 \
-       ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+       (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
 #define ZPCI_IOMAP_ADDR_IDX_MASK                                               \
-       (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
+       ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
 
 struct zpci_iomap_entry {
        u32 fh;
index 5510c7d10ddc31fed07af20d57e793bc4a3ae97a..21d62d8b6b9afe7f8698a5d2e796a7fdef0d1639 100644 (file)
@@ -290,7 +290,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                return;
 
        regs = ftrace_get_regs(fregs);
-       preempt_disable_notrace();
        p = get_kprobe((kprobe_opcode_t *)ip);
        if (unlikely(!p) || kprobe_disabled(p))
                goto out;
@@ -318,7 +317,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
        }
        __this_cpu_write(current_kprobe, NULL);
 out:
-       preempt_enable_notrace();
        ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
index 0df83ecaa2e0c0c6e94199fe33b4f6db64baa798..cb70996823401c1b7aef7223f76225c8849b93de 100644 (file)
@@ -138,7 +138,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
        int from_idle;
 
-       irq_enter();
+       irq_enter_rcu();
 
        if (user_mode(regs)) {
                update_timer_sys();
@@ -158,7 +158,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
                        do_irq_async(regs, IO_INTERRUPT);
        } while (MACHINE_IS_LPAR && irq_pending(regs));
 
-       irq_exit();
+       irq_exit_rcu();
+
        set_irq_regs(old_regs);
        irqentry_exit(regs, state);
 
@@ -172,7 +173,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
        struct pt_regs *old_regs = set_irq_regs(regs);
        int from_idle;
 
-       irq_enter();
+       irq_enter_rcu();
 
        if (user_mode(regs)) {
                update_timer_sys();
@@ -190,7 +191,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 
        do_irq_async(regs, EXT_INTERRUPT);
 
-       irq_exit();
+       irq_exit_rcu();
        set_irq_regs(old_regs);
        irqentry_exit(regs, state);
 
index 9975ad200d74790d5aebc2c9c4f72d78a0bdf39b..8f43575a4dd32a33df9a3984880d0a05f0ef480a 100644 (file)
@@ -7,6 +7,8 @@
  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
  */
 
+#define pr_fmt(fmt)    "kexec: " fmt
+
 #include <linux/elf.h>
 #include <linux/errno.h>
 #include <linux/kexec.h>
@@ -290,8 +292,16 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                                     const Elf_Shdr *relsec,
                                     const Elf_Shdr *symtab)
 {
+       const char *strtab, *name, *shstrtab;
+       const Elf_Shdr *sechdrs;
        Elf_Rela *relas;
        int i, r_type;
+       int ret;
+
+       /* String & section header string table */
+       sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+       strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+       shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
 
        relas = (void *)pi->ehdr + relsec->sh_offset;
 
@@ -304,15 +314,27 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                sym = (void *)pi->ehdr + symtab->sh_offset;
                sym += ELF64_R_SYM(relas[i].r_info);
 
-               if (sym->st_shndx == SHN_UNDEF)
+               if (sym->st_name)
+                       name = strtab + sym->st_name;
+               else
+                       name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+               if (sym->st_shndx == SHN_UNDEF) {
+                       pr_err("Undefined symbol: %s\n", name);
                        return -ENOEXEC;
+               }
 
-               if (sym->st_shndx == SHN_COMMON)
+               if (sym->st_shndx == SHN_COMMON) {
+                       pr_err("symbol '%s' in common section\n", name);
                        return -ENOEXEC;
+               }
 
                if (sym->st_shndx >= pi->ehdr->e_shnum &&
-                   sym->st_shndx != SHN_ABS)
+                   sym->st_shndx != SHN_ABS) {
+                       pr_err("Invalid section %d for symbol %s\n",
+                              sym->st_shndx, name);
                        return -ENOEXEC;
+               }
 
                loc = pi->purgatory_buf;
                loc += section->sh_offset;
@@ -326,7 +348,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
                addr = section->sh_addr + relas[i].r_offset;
 
                r_type = ELF64_R_TYPE(relas[i].r_info);
-               arch_kexec_do_relocs(r_type, loc, val, addr);
+
+               if (r_type == R_390_PLT32DBL)
+                       r_type = R_390_PC32DBL;
+
+               ret = arch_kexec_do_relocs(r_type, loc, val, addr);
+               if (ret) {
+                       pr_err("Unknown rela relocation: %d\n", r_type);
+                       return -ENOEXEC;
+               }
        }
        return 0;
 }
index cfc5f5557c06756236b935eacee5313da1f663d3..bc7973359ae2786b71f8386bf9903b0daaf66e1f 100644 (file)
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
                }
 
                /*
-                * trigger specification exception
+                * Trigger operation exception; use insn notation to bypass
+                * llvm's integrated assembler sanity checks.
                 */
                asm volatile(
-                       "       mvcl    %%r1,%%r1\n"
+                       "       .insn   e,0x0000\n"     /* illegal opcode */
                        "0:     nopr    %%r7\n"
                        EX_TABLE(0b, 0b)
                        :);
index 7399327d1eff79da273ba40c65f5e4f64c48c2e2..5c2ccb85f2efb863fac8fca28a5bcff0f07ab900 100644 (file)
@@ -1932,6 +1932,7 @@ config EFI
        depends on ACPI
        select UCS2_STRING
        select EFI_RUNTIME_WRAPPERS
+       select ARCH_USE_MEMREMAP_PROT
        help
          This enables the kernel to use EFI runtime services that are
          available (such as the EFI variable services).
index e38a4cf795d962bbb8312dc6241333b8939a6b8b..97b1f84bb53f808b9bcddba8af67732030171026 100644 (file)
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
        ud2
 1:
 #endif
+#ifdef CONFIG_XEN_PV
+       ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+#endif
+
        POP_REGS pop_rdi=0
 
        /*
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
 .Lparanoid_entry_checkgs:
        /* EBX = 1 -> kernel GSBASE active, no restore required */
        movl    $1, %ebx
+
        /*
         * The kernel-enforced convention is a negative GSBASE indicates
         * a kernel value. No SWAPGS needed on entry and exit.
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
        movl    $MSR_GS_BASE, %ecx
        rdmsr
        testl   %edx, %edx
-       jns     .Lparanoid_entry_swapgs
-       ret
+       js      .Lparanoid_kernel_gsbase
 
-.Lparanoid_entry_swapgs:
+       /* EBX = 0 -> SWAPGS required on exit */
+       xorl    %ebx, %ebx
        swapgs
+.Lparanoid_kernel_gsbase:
 
-       /*
-        * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
-        * unconditional CR3 write, even in the PTI case.  So do an lfence
-        * to prevent GS speculation, regardless of whether PTI is enabled.
-        */
        FENCE_SWAPGS_KERNEL_ENTRY
-
-       /* EBX = 0 -> SWAPGS required on exit */
-       xorl    %ebx, %ebx
        ret
 SYM_CODE_END(paranoid_entry)
 
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
        pushq   %r12
        ret
 
-.Lerror_entry_done_lfence:
-       FENCE_SWAPGS_KERNEL_ENTRY
-.Lerror_entry_done:
-       ret
-
        /*
         * There are two places in the kernel that can potentially fault with
         * usergs. Handle them here.  B stepping K8s sometimes report a
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
         * .Lgs_change's error handler with kernel gsbase.
         */
        SWAPGS
-       FENCE_SWAPGS_USER_ENTRY
-       jmp .Lerror_entry_done
+
+       /*
+        * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
+        * kernel or user gsbase.
+        */
+.Lerror_entry_done_lfence:
+       FENCE_SWAPGS_KERNEL_ENTRY
+       ret
 
 .Lbstep_iret:
        /* Fix truncated RIP */
index 5a0bcf8b78d7c2026e93715153decec133a33869..048b6d5aff504f394baeca8d3bf1f39e816bfdd5 100644 (file)
 #define INTEL_FAM6_ALDERLAKE           0x97    /* Golden Cove / Gracemont */
 #define INTEL_FAM6_ALDERLAKE_L         0x9A    /* Golden Cove / Gracemont */
 
-#define INTEL_FAM6_RAPTOR_LAKE         0xB7
+#define INTEL_FAM6_RAPTORLAKE          0xB7
 
 /* "Small Core" Processors (Atom) */
 
index cefe1d81e2e8b7951a69ebee657c64008261730f..9e50da3ed01a3a2598f153884e1f0d5bc71a0abe 100644 (file)
@@ -47,6 +47,7 @@ KVM_X86_OP(set_dr7)
 KVM_X86_OP(cache_reg)
 KVM_X86_OP(get_rflags)
 KVM_X86_OP(set_rflags)
+KVM_X86_OP(get_if_flag)
 KVM_X86_OP(tlb_flush_all)
 KVM_X86_OP(tlb_flush_current)
 KVM_X86_OP_NULL(tlb_remote_flush)
index 6ac61f85e07b9971c40158d0f7d88cde0e3ba55c..555f4de47ef293f3ab97b6404270c8c950334adc 100644 (file)
@@ -97,7 +97,7 @@
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
 #define KVM_REQ_TLB_FLUSH_GUEST \
-       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_APF_READY              KVM_ARCH_REQ(28)
 #define KVM_REQ_MSR_FILTER_CHANGED     KVM_ARCH_REQ(29)
 #define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
 #define APICV_INHIBIT_REASON_PIT_REINJ  4
 #define APICV_INHIBIT_REASON_X2APIC    5
 #define APICV_INHIBIT_REASON_BLOCKIRQ  6
+#define APICV_INHIBIT_REASON_ABSENT    7
 
 struct kvm_arch {
        unsigned long n_used_mmu_pages;
@@ -1348,6 +1349,7 @@ struct kvm_x86_ops {
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+       bool (*get_if_flag)(struct kvm_vcpu *vcpu);
 
        void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
        void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
index 4cd49afa0ca4ba5c13395f821027b1685afc31a8..74f0a2d34ffdd0ea26f73ad0b936240048babf6e 100644 (file)
@@ -4,8 +4,8 @@
 
 #include <asm/cpufeature.h>
 
-#define PKRU_AD_BIT 0x1
-#define PKRU_WD_BIT 0x2
+#define PKRU_AD_BIT 0x1u
+#define PKRU_WD_BIT 0x2u
 #define PKRU_BITS_PER_PKEY 2
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
index 2cef6c5a52c2a71a550393c78111dfd3242f47d1..6acaf5af0a3d0657ed48b876c95ef0ad73358c71 100644 (file)
 
 #define GHCB_RESP_CODE(v)              ((v) & GHCB_MSR_INFO_MASK)
 
+/*
+ * Error codes related to GHCB input that can be communicated back to the guest
+ * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ */
+#define GHCB_ERR_NOT_REGISTERED                1
+#define GHCB_ERR_INVALID_USAGE         2
+#define GHCB_ERR_INVALID_SCRATCH_AREA  3
+#define GHCB_ERR_MISSING_INPUT         4
+#define GHCB_ERR_INVALID_INPUT         5
+#define GHCB_ERR_INVALID_EVENT         6
+
 #endif
index d5958278eba6d311d3af758e2ff9e99f60fa3b8a..91d4b6de58abef38792eef58546b525eae278cb3 100644 (file)
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
                                      struct fpstate *fpstate)
 {
        struct xregs_state __user *x = buf;
-       struct _fpx_sw_bytes sw_bytes;
+       struct _fpx_sw_bytes sw_bytes = {};
        u32 xfeatures;
        int err;
 
index 6a190c7f4d71b05fe1dc9a476424a647999eeb82..e04f5e6eb33f453193c4769f8e39ded586e2a3fa 100644 (file)
@@ -713,9 +713,6 @@ static void __init early_reserve_memory(void)
 
        early_reserve_initrd();
 
-       if (efi_enabled(EFI_BOOT))
-               efi_memblock_x86_reserve_range();
-
        memblock_x86_reserve_range_setup_data();
 
        reserve_ibft_region();
@@ -742,28 +739,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
        return 0;
 }
 
-static char * __init prepare_command_line(void)
-{
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
-       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
-       if (builtin_cmdline[0]) {
-               /* append boot loader cmdline to builtin */
-               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
-               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
-               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-       }
-#endif
-#endif
-
-       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-
-       parse_early_param();
-
-       return command_line;
-}
-
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -852,23 +827,6 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
-       /*
-        * x86_configure_nx() is called before parse_early_param() (called by
-        * prepare_command_line()) to detect whether hardware doesn't support
-        * NX (so that the early EHCI debug console setup can safely call
-        * set_fixmap()). It may then be called again from within noexec_setup()
-        * during parsing early parameters to honor the respective command line
-        * option.
-        */
-       x86_configure_nx();
-
-       /*
-        * This parses early params and it needs to run before
-        * early_reserve_memory() because latter relies on such settings
-        * supplied as early params.
-        */
-       *cmdline_p = prepare_command_line();
-
        /*
         * Do some memory reservations *before* memory is added to memblock, so
         * memblock allocations won't overwrite it.
@@ -902,6 +860,36 @@ void __init setup_arch(char **cmdline_p)
        bss_resource.start = __pa_symbol(__bss_start);
        bss_resource.end = __pa_symbol(__bss_stop)-1;
 
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+       strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+       if (builtin_cmdline[0]) {
+               /* append boot loader cmdline to builtin */
+               strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+               strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+               strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+       }
+#endif
+#endif
+
+       strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+       *cmdline_p = command_line;
+
+       /*
+        * x86_configure_nx() is called before parse_early_param() to detect
+        * whether hardware doesn't support NX (so that the early EHCI debug
+        * console setup can safely call set_fixmap()). It may then be called
+        * again from within noexec_setup() during parsing early parameters
+        * to honor the respective command line option.
+        */
+       x86_configure_nx();
+
+       parse_early_param();
+
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
+
 #ifdef CONFIG_MEMORY_HOTPLUG
        /*
         * Memory used by the kernel cannot be hot-removed because Linux
index 74f0ec95538486a8dba1d4fab55866ed1b646afd..a9fc2ac7a8bd59cd06ca1c6764cabcd014a61aa3 100644 (file)
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
                                   char *dst, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
-       char __user *target = (char __user *)dst;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __put_user() independent of whether kernel or user
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *target = (u8 __user *)dst;
+
                memcpy(&d1, buf, 1);
                if (__put_user(d1, target))
                        goto fault;
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *target = (u16 __user *)dst;
+
                memcpy(&d2, buf, 2);
                if (__put_user(d2, target))
                        goto fault;
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *target = (u32 __user *)dst;
+
                memcpy(&d4, buf, 4);
                if (__put_user(d4, target))
                        goto fault;
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *target = (u64 __user *)dst;
+
                memcpy(&d8, buf, 8);
                if (__put_user(d8, target))
                        goto fault;
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
                                  char *src, char *buf, size_t size)
 {
        unsigned long error_code = X86_PF_PROT;
-       char __user *s = (char __user *)src;
-       u64 d8;
-       u32 d4;
-       u16 d2;
-       u8  d1;
 
        /*
         * This function uses __get_user() independent of whether kernel or user
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
         * instructions here would cause infinite nesting.
         */
        switch (size) {
-       case 1:
+       case 1: {
+               u8 d1;
+               u8 __user *s = (u8 __user *)src;
+
                if (__get_user(d1, s))
                        goto fault;
                memcpy(buf, &d1, 1);
                break;
-       case 2:
+       }
+       case 2: {
+               u16 d2;
+               u16 __user *s = (u16 __user *)src;
+
                if (__get_user(d2, s))
                        goto fault;
                memcpy(buf, &d2, 2);
                break;
-       case 4:
+       }
+       case 4: {
+               u32 d4;
+               u32 __user *s = (u32 __user *)src;
+
                if (__get_user(d4, s))
                        goto fault;
                memcpy(buf, &d4, 4);
                break;
-       case 8:
+       }
+       case 8: {
+               u64 d8;
+               u64 __user *s = (u64 __user *)src;
                if (__get_user(d8, s))
                        goto fault;
                memcpy(buf, &d8, 8);
                break;
+       }
        default:
                WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
                return ES_UNSUPPORTED;
index ac2909f0cab3478c9428fc2a609759c6dd5af3ef..617012f4619f0ba76419207720fb3f5b326703b9 100644 (file)
@@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
        { NULL, },
 };
 
+static struct sched_domain_topology_level x86_hybrid_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
 static struct sched_domain_topology_level x86_topology[] = {
 #ifdef CONFIG_SCHED_SMT
        { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 
        calculate_max_logical_packages();
 
+       /* XXX for now assume numa-in-package and hybrid don't overlap */
        if (x86_has_numa_in_package)
                set_sched_topology(x86_numa_in_package_topology);
+       if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+               set_sched_topology(x86_hybrid_topology);
 
        nmi_selftest();
        impress_friends();
index 2e076a459a0c084aa279f32f3fc644604713e595..a698196377be9bf650eb8bc1ea28692068568626 100644 (file)
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
 
 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
 
+static void __init tsc_disable_clocksource_watchdog(void)
+{
+       clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+       clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+}
+
 static void __init check_system_tsc_reliable(void)
 {
 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
 #endif
        if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
                tsc_clocksource_reliable = 1;
+
+       /*
+        * Disable the clocksource watchdog when the system has:
+        *  - TSC running at constant frequency
+        *  - TSC which does not stop in C-States
+        *  - the TSC_ADJUST register which allows to detect even minimal
+        *    modifications
+        *  - not more than two sockets. As the number of sockets cannot be
+        *    evaluated at the early boot stage where this has to be
+        *    invoked, check the number of online memory nodes as a
+        *    fallback solution which is an reasonable estimate.
+        */
+       if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+           boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+           boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
+           nr_online_nodes <= 2)
+               tsc_disable_clocksource_watchdog();
 }
 
 /*
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
        if (tsc_unstable)
                goto unreg;
 
-       if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-
        if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
                clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
 
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
        }
 
        if (tsc_clocksource_reliable || no_tsc_watchdog)
-               clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+               tsc_disable_clocksource_watchdog();
 
        clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
        detect_art();
index 50a4515fe0ad15ec241c257735022287094a4514..9452dc9664b51fddcfaeb6274935c91885814fda 100644 (file)
@@ -30,6 +30,7 @@ struct tsc_adjust {
 };
 
 static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+static struct timer_list tsc_sync_check_timer;
 
 /*
  * TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
        }
 }
 
+/*
+ * Normally the tsc_sync will be checked every time system enters idle
+ * state, but there is still caveat that a system won't enter idle,
+ * either because it's too busy or configured purposely to not enter
+ * idle.
+ *
+ * So setup a periodic timer (every 10 minutes) to make sure the check
+ * is always on.
+ */
+
+#define SYNC_CHECK_INTERVAL            (HZ * 600)
+
+static void tsc_sync_check_timer_fn(struct timer_list *unused)
+{
+       int next_cpu;
+
+       tsc_verify_tsc_adjust(false);
+
+       /* Run the check for all onlined CPUs in turn */
+       next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
+       if (next_cpu >= nr_cpu_ids)
+               next_cpu = cpumask_first(cpu_online_mask);
+
+       tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
+       add_timer_on(&tsc_sync_check_timer, next_cpu);
+}
+
+static int __init start_sync_check_timer(void)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
+               return 0;
+
+       timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+       tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
+       add_timer(&tsc_sync_check_timer);
+
+       return 0;
+}
+late_initcall(start_sync_check_timer);
+
 static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
                                   unsigned int cpu, bool bootcpu)
 {
index 5e19e6e4c2ce0b5d357ce04eccb5a7cecf9c478b..8d8c1cc7cb539a048e5409512aefb0b941a5f526 100644 (file)
@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
 
                all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
 
+               if (all_cpus)
+                       goto check_and_send_ipi;
+
                if (!sparse_banks_len)
                        goto ret_success;
 
-               if (!all_cpus &&
-                   kvm_read_guest(kvm,
+               if (kvm_read_guest(kvm,
                                   hc->ingpa + offsetof(struct hv_send_ipi_ex,
                                                        vp_set.bank_contents),
                                   sparse_banks,
@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
 
+check_and_send_ipi:
        if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
                return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
index e66e620c3bed9eafde705b0315ff977fb158d031..539333ac4b38082f01e0407908441aa39b9ca136 100644 (file)
@@ -81,7 +81,6 @@ struct kvm_ioapic {
        unsigned long irq_states[IOAPIC_NUM_PINS];
        struct kvm_io_device dev;
        struct kvm *kvm;
-       void (*ack_notifier)(void *opaque, int irq);
        spinlock_t lock;
        struct rtc_status rtc_status;
        struct delayed_work eoi_inject;
index 650642b18d151083e7120b81ef51dae690090333..c2d7cfe82d004b1ae4d9518b0a73a0755114bedd 100644 (file)
@@ -56,7 +56,6 @@ struct kvm_pic {
        struct kvm_io_device dev_master;
        struct kvm_io_device dev_slave;
        struct kvm_io_device dev_elcr;
-       void (*ack_notifier)(void *opaque, int irq);
        unsigned long irq_states[PIC_NUM_PINS];
 };
 
index 759952dd122284b183c3735bad40717495920304..f206fc35deff6ef4d0a236eddd4476635d4e5b1f 100644 (file)
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 {
        int highest_irr;
-       if (apic->vcpu->arch.apicv_active)
+       if (kvm_x86_ops.sync_pir_to_irr)
                highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
        else
                highest_irr = apic_find_highest_irr(apic);
index 3be9beea838d134a077a67e05f12a1c9aaa7c2ef..fcdf3f8bb59a6c2fb71ec5b5c2bbc0a89120534f 100644 (file)
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
                flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
 
        if (is_tdp_mmu_enabled(kvm))
-               flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+               flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
 
        return flush;
 }
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
 
 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
-       return sp->role.invalid ||
+       if (sp->role.invalid)
+               return true;
+
+       /* TDP MMU pages due not use the MMU generation. */
+       return !sp->tdp_mmu_page &&
               unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
 }
 
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
        iterator->shadow_addr = root;
        iterator->level = vcpu->arch.mmu->shadow_root_level;
 
-       if (iterator->level == PT64_ROOT_4LEVEL &&
+       if (iterator->level >= PT64_ROOT_4LEVEL &&
            vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
            !vcpu->arch.mmu->direct_map)
-               --iterator->level;
+               iterator->level = PT32E_ROOT_LEVEL;
 
        if (iterator->level == PT32E_ROOT_LEVEL) {
                /*
@@ -3976,6 +3980,34 @@ out_retry:
        return true;
 }
 
+/*
+ * Returns true if the page fault is stale and needs to be retried, i.e. if the
+ * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+ */
+static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+                               struct kvm_page_fault *fault, int mmu_seq)
+{
+       struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa);
+
+       /* Special roots, e.g. pae_root, are not backed by shadow pages. */
+       if (sp && is_obsolete_sp(vcpu->kvm, sp))
+               return true;
+
+       /*
+        * Roots without an associated shadow page are considered invalid if
+        * there is a pending request to free obsolete roots.  The request is
+        * only a hint that the current root _may_ be obsolete and needs to be
+        * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
+        * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
+        * to reload even if no vCPU is actively using the root.
+        */
+       if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu))
+               return true;
+
+       return fault->slot &&
+              mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+}
+
 static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@@ -4013,8 +4045,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        else
                write_lock(&vcpu->kvm->mmu_lock);
 
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
+
        r = make_mmu_pages_available(vcpu);
        if (r)
                goto out_unlock;
@@ -4855,7 +4888,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
        struct kvm_mmu *context = &vcpu->arch.guest_mmu;
        struct kvm_mmu_role_regs regs = {
                .cr0 = cr0,
-               .cr4 = cr4,
+               .cr4 = cr4 & ~X86_CR4_PKE,
                .efer = efer,
        };
        union kvm_mmu_role new_role;
@@ -4919,7 +4952,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
        context->direct_map = false;
 
        update_permission_bitmask(context, true);
-       update_pkru_bitmask(context);
+       context->pkru_mask = 0;
        reset_rsvds_bits_mask_ept(vcpu, context, execonly);
        reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
@@ -5025,6 +5058,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        /*
         * Invalidate all MMU roles to force them to reinitialize as CPUID
         * information is factored into reserved bit calculations.
+        *
+        * Correctly handling multiple vCPU models with respect to paging and
+        * physical address properties) in a single VM would require tracking
+        * all relevant CPUID information in kvm_mmu_page_role. That is very
+        * undesirable as it would increase the memory requirements for
+        * gfn_track (see struct kvm_mmu_page_role comments).  For now that
+        * problem is swept under the rug; KVM's CPUID API is horrific and
+        * it's all but impossible to solve it without introducing a new API.
         */
        vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
        vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@@ -5032,24 +5073,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
        kvm_mmu_reset_context(vcpu);
 
        /*
-        * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
-        * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
-        * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
-        * faults due to reusing SPs/SPTEs.  Alert userspace, but otherwise
-        * sweep the problem under the rug.
-        *
-        * KVM's horrific CPUID ABI makes the problem all but impossible to
-        * solve, as correctly handling multiple vCPU models (with respect to
-        * paging and physical address properties) in a single VM would require
-        * tracking all relevant CPUID information in kvm_mmu_page_role.  That
-        * is very undesirable as it would double the memory requirements for
-        * gfn_track (see struct kvm_mmu_page_role comments), and in practice
-        * no sane VMM mucks with the core vCPU model on the fly.
+        * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
+        * kvm_arch_vcpu_ioctl().
         */
-       if (vcpu->arch.last_vmentry_cpu != -1) {
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
-               pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
-       }
+       KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
 }
 
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -5369,7 +5396,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
 {
-       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
+       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
        ++vcpu->stat.invlpg;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5854,8 +5881,6 @@ restart:
 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                                   const struct kvm_memory_slot *slot)
 {
-       bool flush = false;
-
        if (kvm_memslots_have_rmaps(kvm)) {
                write_lock(&kvm->mmu_lock);
                /*
@@ -5863,17 +5888,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
                 * logging at a 4k granularity and never creates collapsible
                 * 2m SPTEs during dirty logging.
                 */
-               flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
-               if (flush)
+               if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
                        kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
                write_unlock(&kvm->mmu_lock);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
                read_lock(&kvm->mmu_lock);
-               flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
-               if (flush)
-                       kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+               kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
                read_unlock(&kvm->mmu_lock);
        }
 }
@@ -6182,23 +6204,46 @@ void kvm_mmu_module_exit(void)
        mmu_audit_disable();
 }
 
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a halving time of 1 hour".  Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+       /*
+        * Use READ_ONCE to get the params, this may be called outside of the
+        * param setters, e.g. by the kthread to compute its next timeout.
+        */
+       bool enabled = READ_ONCE(nx_huge_pages);
+       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+       if (!enabled || !ratio)
+               return false;
+
+       *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       if (!*period) {
+               /* Make sure the period is not less than one second.  */
+               ratio = min(ratio, 3600u);
+               *period = 60 * 60 * 1000 / ratio;
+       }
+       return true;
+}
+
 static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
 {
        bool was_recovery_enabled, is_recovery_enabled;
        uint old_period, new_period;
        int err;
 
-       was_recovery_enabled = nx_huge_pages_recovery_ratio;
-       old_period = nx_huge_pages_recovery_period_ms;
+       was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
 
        err = param_set_uint(val, kp);
        if (err)
                return err;
 
-       is_recovery_enabled = nx_huge_pages_recovery_ratio;
-       new_period = nx_huge_pages_recovery_period_ms;
+       is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
 
-       if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+       if (is_recovery_enabled &&
            (!was_recovery_enabled || old_period > new_period)) {
                struct kvm *kvm;
 
@@ -6262,18 +6307,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
 
 static long get_nx_lpage_recovery_timeout(u64 start_time)
 {
-       uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
-       uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+       bool enabled;
+       uint period;
 
-       if (!period && ratio) {
-               /* Make sure the period is not less than one second.  */
-               ratio = min(ratio, 3600u);
-               period = 60 * 60 * 1000 / ratio;
-       }
+       enabled = calc_nx_huge_pages_recovery_period(&period);
 
-       return READ_ONCE(nx_huge_pages) && ratio
-               ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
-               : MAX_SCHEDULE_TIMEOUT;
+       return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+                      : MAX_SCHEDULE_TIMEOUT;
 }
 
 static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
index f87d36898c44e33db8dafabf0f603ae9e7ac3458..708a5d297fe1e370c9912da506cc316b84a9bb83 100644 (file)
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 
        r = RET_PF_RETRY;
        write_lock(&vcpu->kvm->mmu_lock);
-       if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+
+       if (is_page_fault_stale(vcpu, fault, mmu_seq))
                goto out_unlock;
 
        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
index b3ed302c1a359fbe5b46b9aad8f6c0bcc5e7944d..caa96c270b9541bd5769a976bbe9f41227c37b0e 100644 (file)
@@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
  */
 void tdp_iter_restart(struct tdp_iter *iter)
 {
+       iter->yielded = false;
        iter->yielded_gfn = iter->next_last_level_gfn;
        iter->level = iter->root_level;
 
@@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter)
  */
 void tdp_iter_next(struct tdp_iter *iter)
 {
+       if (iter->yielded) {
+               tdp_iter_restart(iter);
+               return;
+       }
+
        if (try_step_down(iter))
                return;
 
index b1748b988d3aef556087b26ffad29accbd2756ff..e19cabbcb65c846e6065406c336848fb38a959fc 100644 (file)
@@ -45,6 +45,12 @@ struct tdp_iter {
         * iterator walks off the end of the paging structure.
         */
        bool valid;
+       /*
+        * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
+        * which case tdp_iter_next() needs to restart the walk at the root
+        * level instead of advancing to the next entry.
+        */
+       bool yielded;
 };
 
 /*
index a54c3491af42c9fba8a894619ee7bd5c7f3f4628..1beb4ca9056092cca031b9ebd65c12112852bdd8 100644 (file)
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
        int level = sp->role.level;
        gfn_t base_gfn = sp->gfn;
-       u64 old_child_spte;
-       u64 *sptep;
-       gfn_t gfn;
        int i;
 
        trace_kvm_mmu_prepare_zap_page(sp);
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
        tdp_mmu_unlink_page(kvm, sp, shared);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
-               sptep = rcu_dereference(pt) + i;
-               gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 *sptep = rcu_dereference(pt) + i;
+               gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+               u64 old_child_spte;
 
                if (shared) {
                        /*
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
                                    shared);
        }
 
-       kvm_flush_remote_tlbs_with_address(kvm, gfn,
+       kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
                                           KVM_PAGES_PER_HPAGE(level + 1));
 
        call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -504,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
                                           struct tdp_iter *iter,
                                           u64 new_spte)
 {
+       WARN_ON_ONCE(iter->yielded);
+
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        /*
@@ -577,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
                                      u64 new_spte, bool record_acc_track,
                                      bool record_dirty_log)
 {
+       WARN_ON_ONCE(iter->yielded);
+
        lockdep_assert_held_write(&kvm->mmu_lock);
 
        /*
@@ -642,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
  * If this function should yield and flush is set, it will perform a remote
  * TLB flush before yielding.
  *
- * If this function yields, it will also reset the tdp_iter's walk over the
- * paging structure and the calling function should skip to the next
- * iteration to allow the iterator to continue its traversal from the
- * paging structure root.
+ * If this function yields, iter->yielded is set and the caller must skip to
+ * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
+ * over the paging structures to allow the iterator to continue its traversal
+ * from the paging structure root.
  *
- * Return true if this function yielded and the iterator's traversal was reset.
- * Return false if a yield was not needed.
+ * Returns true if this function yielded.
  */
-static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
-                                            struct tdp_iter *iter, bool flush,
-                                            bool shared)
+static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
+                                                         struct tdp_iter *iter,
+                                                         bool flush, bool shared)
 {
+       WARN_ON(iter->yielded);
+
        /* Ensure forward progress has been made before yielding. */
        if (iter->next_last_level_gfn == iter->yielded_gfn)
                return false;
@@ -673,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
 
                WARN_ON(iter->gfn > iter->next_last_level_gfn);
 
-               tdp_iter_restart(iter);
-
-               return true;
+               iter->yielded = true;
        }
 
-       return false;
+       return iter->yielded;
 }
 
 /*
@@ -1033,9 +1034,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
        struct kvm_mmu_page *root;
 
-       for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
-               flush |= zap_gfn_range(kvm, root, range->start, range->end,
-                                      range->may_block, flush, false);
+       for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+               flush = zap_gfn_range(kvm, root, range->start, range->end,
+                                     range->may_block, flush, false);
 
        return flush;
 }
@@ -1364,10 +1365,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  * Clear leaf entries which could be replaced by large mappings, for
  * GFNs within the slot.
  */
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+                                      const struct kvm_memory_slot *slot)
 {
        gfn_t start = slot->base_gfn;
        gfn_t end = start + slot->npages;
@@ -1378,10 +1378,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
 
        tdp_root_for_each_pte(iter, root, start, end) {
 retry:
-               if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
-                       flush = false;
+               if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
-               }
 
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
@@ -1393,6 +1391,7 @@ retry:
                                                            pfn, PG_LEVEL_NUM))
                        continue;
 
+               /* Note, a successful atomic zap also does a remote TLB flush. */
                if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
                        /*
                         * The iter must explicitly re-read the SPTE because
@@ -1401,30 +1400,24 @@ retry:
                        iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
                        goto retry;
                }
-               flush = true;
        }
 
        rcu_read_unlock();
-
-       return flush;
 }
 
 /*
  * Clear non-leaf entries (and free associated page tables) which could
  * be replaced by large mappings, for GFNs within the slot.
  */
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot)
 {
        struct kvm_mmu_page *root;
 
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
-               flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
-       return flush;
+               zap_collapsible_spte_range(kvm, root, slot);
 }
 
 /*
index 476b133544dd94e8465258c91cfdba0f2ddb8029..3899004a5d91e70b8821656cc0715519dc770d36 100644 (file)
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                                       struct kvm_memory_slot *slot,
                                       gfn_t gfn, unsigned long mask,
                                       bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
-                                      const struct kvm_memory_slot *slot,
-                                      bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+                                      const struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,
index affc0ea98d302286303188c91bfdb73bb2cef7e2..8f9af7b7dbbe479fbf914d7c27afdb2d77eb7513 100644 (file)
@@ -900,6 +900,7 @@ out:
 bool svm_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_NESTED) |
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       int cpu = get_cpu();
 
+       WARN_ON(cpu != vcpu->cpu);
        svm->avic_is_running = is_run;
 
-       if (!kvm_vcpu_apicv_active(vcpu))
-               return;
-
-       if (is_run)
-               avic_vcpu_load(vcpu, vcpu->cpu);
-       else
-               avic_vcpu_put(vcpu);
+       if (kvm_vcpu_apicv_active(vcpu)) {
+               if (is_run)
+                       avic_vcpu_load(vcpu, cpu);
+               else
+                       avic_vcpu_put(vcpu);
+       }
+       put_cpu();
 }
 
 void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
index 871c426ec389a98632307b16d661c2abf5b856b0..b4095dfeeee62fa1702c3aa48d2af059d03e4d28 100644 (file)
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
 
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
-       pmu->reserved_bits = 0xffffffff00200000ull;
+       pmu->reserved_bits = 0xfffffff000280000ull;
        pmu->version = 1;
        /* not applicable to AMD; but clean them to prevent any fall out */
        pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
index 21ac0a5de4e0c8ba019fe943c0cc11e9533edfcc..7656a2c5662a68425716469b4f94cb164368cb01 100644 (file)
@@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
        return false;
 }
 
-static int sev_lock_for_migration(struct kvm *kvm)
+static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+       int r = -EBUSY;
+
+       if (dst_kvm == src_kvm)
+               return -EINVAL;
 
        /*
-        * Bail if this VM is already involved in a migration to avoid deadlock
-        * between two VMs trying to migrate to/from each other.
+        * Bail if these VMs are already involved in a migration to avoid
+        * deadlock between two VMs trying to migrate to/from each other.
         */
-       if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+       if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
                return -EBUSY;
 
-       mutex_lock(&kvm->lock);
+       if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
+               goto release_dst;
 
+       r = -EINTR;
+       if (mutex_lock_killable(&dst_kvm->lock))
+               goto release_src;
+       if (mutex_lock_killable(&src_kvm->lock))
+               goto unlock_dst;
        return 0;
+
+unlock_dst:
+       mutex_unlock(&dst_kvm->lock);
+release_src:
+       atomic_set_release(&src_sev->migration_in_progress, 0);
+release_dst:
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       return r;
 }
 
-static void sev_unlock_after_migration(struct kvm *kvm)
+static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
 {
-       struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+       struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+       struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
 
-       mutex_unlock(&kvm->lock);
-       atomic_set_release(&sev->migration_in_progress, 0);
+       mutex_unlock(&dst_kvm->lock);
+       mutex_unlock(&src_kvm->lock);
+       atomic_set_release(&dst_sev->migration_in_progress, 0);
+       atomic_set_release(&src_sev->migration_in_progress, 0);
 }
 
 
@@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
        dst->asid = src->asid;
        dst->handle = src->handle;
        dst->pages_locked = src->pages_locked;
+       dst->enc_context_owner = src->enc_context_owner;
 
        src->asid = 0;
        src->active = false;
        src->handle = 0;
        src->pages_locked = 0;
+       src->enc_context_owner = NULL;
 
-       INIT_LIST_HEAD(&dst->regions_list);
-       list_replace_init(&src->regions_list, &dst->regions_list);
+       list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
 }
 
 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        bool charged = false;
        int ret;
 
-       ret = sev_lock_for_migration(kvm);
-       if (ret)
-               return ret;
-
-       if (sev_guest(kvm)) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
@@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
        }
 
        source_kvm = source_kvm_file->private_data;
-       ret = sev_lock_for_migration(source_kvm);
+       ret = sev_lock_two_vms(kvm, source_kvm);
        if (ret)
                goto out_fput;
 
-       if (!sev_guest(source_kvm)) {
+       if (sev_guest(kvm) || !sev_guest(source_kvm)) {
                ret = -EINVAL;
-               goto out_source;
+               goto out_unlock;
        }
 
        src_sev = &to_kvm_svm(source_kvm)->sev_info;
+
+       /*
+        * VMs mirroring src's encryption context rely on it to keep the
+        * ASID allocated, but below we are clearing src_sev->asid.
+        */
+       if (src_sev->num_mirrored_vms) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
        dst_sev->misc_cg = get_current_misc_cg();
        cg_cleanup_sev = dst_sev;
        if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1728,13 +1752,11 @@ out_dst_cgroup:
                sev_misc_cg_uncharge(cg_cleanup_sev);
        put_misc_cg(cg_cleanup_sev->misc_cg);
        cg_cleanup_sev->misc_cg = NULL;
-out_source:
-       sev_unlock_after_migration(source_kvm);
+out_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
 out_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
-out_unlock:
-       sev_unlock_after_migration(kvm);
        return ret;
 }
 
@@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
 {
        struct file *source_kvm_file;
        struct kvm *source_kvm;
-       struct kvm_sev_info source_sev, *mirror_sev;
+       struct kvm_sev_info *source_sev, *mirror_sev;
        int ret;
 
        source_kvm_file = fget(source_fd);
        if (!file_is_kvm(source_kvm_file)) {
                ret = -EBADF;
-               goto e_source_put;
+               goto e_source_fput;
        }
 
        source_kvm = source_kvm_file->private_data;
-       mutex_lock(&source_kvm->lock);
-
-       if (!sev_guest(source_kvm)) {
-               ret = -EINVAL;
-               goto e_source_unlock;
-       }
+       ret = sev_lock_two_vms(kvm, source_kvm);
+       if (ret)
+               goto e_source_fput;
 
-       /* Mirrors of mirrors should work, but let's not get silly */
-       if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+       /*
+        * Mirrors of mirrors should work, but let's not get silly.  Also
+        * disallow out-of-band SEV/SEV-ES init if the target is already an
+        * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
+        * created after SEV/SEV-ES initialization, e.g. to init intercepts.
+        */
+       if (sev_guest(kvm) || !sev_guest(source_kvm) ||
+           is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
                ret = -EINVAL;
-               goto e_source_unlock;
+               goto e_unlock;
        }
 
-       memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
-              sizeof(source_sev));
-
        /*
         * The mirror kvm holds an enc_context_owner ref so its asid can't
         * disappear until we're done with it
         */
+       source_sev = &to_kvm_svm(source_kvm)->sev_info;
        kvm_get_kvm(source_kvm);
-
-       fput(source_kvm_file);
-       mutex_unlock(&source_kvm->lock);
-       mutex_lock(&kvm->lock);
-
-       /*
-        * Disallow out-of-band SEV/SEV-ES init if the target is already an
-        * SEV guest, or if vCPUs have been created.  KVM relies on vCPUs being
-        * created after SEV/SEV-ES initialization, e.g. to init intercepts.
-        */
-       if (sev_guest(kvm) || kvm->created_vcpus) {
-               ret = -EINVAL;
-               goto e_mirror_unlock;
-       }
+       source_sev->num_mirrored_vms++;
 
        /* Set enc_context_owner and copy its encryption context over */
        mirror_sev = &to_kvm_svm(kvm)->sev_info;
        mirror_sev->enc_context_owner = source_kvm;
        mirror_sev->active = true;
-       mirror_sev->asid = source_sev.asid;
-       mirror_sev->fd = source_sev.fd;
-       mirror_sev->es_active = source_sev.es_active;
-       mirror_sev->handle = source_sev.handle;
+       mirror_sev->asid = source_sev->asid;
+       mirror_sev->fd = source_sev->fd;
+       mirror_sev->es_active = source_sev->es_active;
+       mirror_sev->handle = source_sev->handle;
+       INIT_LIST_HEAD(&mirror_sev->regions_list);
+       ret = 0;
+
        /*
         * Do not copy ap_jump_table. Since the mirror does not share the same
         * KVM contexts as the original, and they may have different
         * memory-views.
         */
 
-       mutex_unlock(&kvm->lock);
-       return 0;
-
-e_mirror_unlock:
-       mutex_unlock(&kvm->lock);
-       kvm_put_kvm(source_kvm);
-       return ret;
-e_source_unlock:
-       mutex_unlock(&source_kvm->lock);
-e_source_put:
+e_unlock:
+       sev_unlock_two_vms(kvm, source_kvm);
+e_source_fput:
        if (source_kvm_file)
                fput(source_kvm_file);
        return ret;
@@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
        struct list_head *head = &sev->regions_list;
        struct list_head *pos, *q;
 
+       WARN_ON(sev->num_mirrored_vms);
+
        if (!sev_guest(kvm))
                return;
 
        /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
        if (is_mirroring_enc_context(kvm)) {
-               kvm_put_kvm(sev->enc_context_owner);
+               struct kvm *owner_kvm = sev->enc_context_owner;
+               struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+
+               mutex_lock(&owner_kvm->lock);
+               if (!WARN_ON(!owner_sev->num_mirrored_vms))
+                       owner_sev->num_mirrored_vms--;
+               mutex_unlock(&owner_kvm->lock);
+               kvm_put_kvm(owner_kvm);
                return;
        }
 
-       mutex_lock(&kvm->lock);
-
        /*
         * Ensure that all guest tagged cache entries are flushed before
         * releasing the pages back to the system for use. CLFLUSH will
@@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
                }
        }
 
-       mutex_unlock(&kvm->lock);
-
        sev_unbind_asid(kvm, sev->handle);
        sev_asid_free(sev);
 }
@@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
        __free_page(virt_to_page(svm->sev_es.vmsa));
 
        if (svm->sev_es.ghcb_sa_free)
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
 }
 
 static void dump_ghcb(struct vcpu_svm *svm)
@@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
        memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
 }
 
-static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
 {
        struct kvm_vcpu *vcpu;
        struct ghcb *ghcb;
-       u64 exit_code = 0;
+       u64 exit_code;
+       u64 reason;
 
        ghcb = svm->sev_es.ghcb;
 
-       /* Only GHCB Usage code 0 is supported */
-       if (ghcb->ghcb_usage)
-               goto vmgexit_err;
-
        /*
-        * Retrieve the exit code now even though is may not be marked valid
+        * Retrieve the exit code now even though it may not be marked valid
         * as it could help with debugging.
         */
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
+       /* Only GHCB Usage code 0 is supported */
+       if (ghcb->ghcb_usage) {
+               reason = GHCB_ERR_INVALID_USAGE;
+               goto vmgexit_err;
+       }
+
+       reason = GHCB_ERR_MISSING_INPUT;
+
        if (!ghcb_sw_exit_code_is_valid(ghcb) ||
            !ghcb_sw_exit_info_1_is_valid(ghcb) ||
            !ghcb_sw_exit_info_2_is_valid(ghcb))
@@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
        default:
+               reason = GHCB_ERR_INVALID_EVENT;
                goto vmgexit_err;
        }
 
-       return 0;
+       return true;
 
 vmgexit_err:
        vcpu = &svm->vcpu;
 
-       if (ghcb->ghcb_usage) {
+       if (reason == GHCB_ERR_INVALID_USAGE) {
                vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
                            ghcb->ghcb_usage);
+       } else if (reason == GHCB_ERR_INVALID_EVENT) {
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
+                           exit_code);
        } else {
-               vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
+               vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
                            exit_code);
                dump_ghcb(svm);
        }
 
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
-       vcpu->run->internal.ndata = 2;
-       vcpu->run->internal.data[0] = exit_code;
-       vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+       /* Clear the valid entries fields */
+       memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, reason);
 
-       return -EINVAL;
+       return false;
 }
 
 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
                        svm->sev_es.ghcb_sa_sync = false;
                }
 
-               kfree(svm->sev_es.ghcb_sa);
+               kvfree(svm->sev_es.ghcb_sa);
                svm->sev_es.ghcb_sa = NULL;
                svm->sev_es.ghcb_sa_free = false;
        }
@@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
        if (!scratch_gpa_beg) {
                pr_err("vmgexit: scratch gpa not provided\n");
-               return false;
+               goto e_scratch;
        }
 
        scratch_gpa_end = scratch_gpa_beg + len;
        if (scratch_gpa_end < scratch_gpa_beg) {
                pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
                       len, scratch_gpa_beg);
-               return false;
+               goto e_scratch;
        }
 
        if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                    scratch_gpa_end > ghcb_scratch_end) {
                        pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
                               scratch_gpa_beg, scratch_gpa_end);
-                       return false;
+                       goto e_scratch;
                }
 
                scratch_va = (void *)svm->sev_es.ghcb;
@@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
                if (len > GHCB_SCRATCH_AREA_LIMIT) {
                        pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
                               len, GHCB_SCRATCH_AREA_LIMIT);
-                       return false;
+                       goto e_scratch;
                }
-               scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
+               scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
                if (!scratch_va)
-                       return false;
+                       goto e_scratch;
 
                if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
                        /* Unable to copy scratch area from guest */
                        pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
 
-                       kfree(scratch_va);
-                       return false;
+                       kvfree(scratch_va);
+                       goto e_scratch;
                }
 
                /*
@@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
        svm->sev_es.ghcb_sa_len = len;
 
        return true;
+
+e_scratch:
+       ghcb_set_sw_exit_info_1(ghcb, 2);
+       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+
+       return false;
 }
 
 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
 
                ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
                if (!ret) {
-                       ret = -EINVAL;
+                       /* Error, keep GHCB MSR value as-is */
                        break;
                }
 
@@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
                                                GHCB_MSR_TERM_REASON_POS);
                pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
                        reason_set, reason_code);
-               fallthrough;
+
+               ret = -EINVAL;
+               break;
        }
        default:
-               ret = -EINVAL;
+               /* Error, keep GHCB MSR value as-is */
+               break;
        }
 
        trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        if (!ghcb_gpa) {
                vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
                /* Unable to map GHCB from guest */
                vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
                            ghcb_gpa);
-               return -EINVAL;
+
+               /* Without a GHCB, just return right back to the guest */
+               return 1;
        }
 
        svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
 
        exit_code = ghcb_get_sw_exit_code(ghcb);
 
-       ret = sev_es_validate_vmgexit(svm);
-       if (ret)
-               return ret;
+       if (!sev_es_validate_vmgexit(svm))
+               return 1;
 
        sev_es_sync_from_ghcb(svm);
        ghcb_set_sw_exit_info_1(ghcb, 0);
        ghcb_set_sw_exit_info_2(ghcb, 0);
 
-       ret = -EINVAL;
+       ret = 1;
        switch (exit_code) {
        case SVM_VMGEXIT_MMIO_READ:
                if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
                default:
                        pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
                               control->exit_info_1);
-                       ghcb_set_sw_exit_info_1(ghcb, 1);
-                       ghcb_set_sw_exit_info_2(ghcb,
-                                               X86_TRAP_UD |
-                                               SVM_EVTINJ_TYPE_EXEPT |
-                                               SVM_EVTINJ_VALID);
+                       ghcb_set_sw_exit_info_1(ghcb, 2);
+                       ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
                }
 
-               ret = 1;
                break;
        }
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                vcpu_unimpl(vcpu,
                            "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
                            control->exit_info_1, control->exit_info_2);
+               ret = -EINVAL;
                break;
        default:
                ret = svm_invoke_exit_handler(vcpu, exit_code);
@@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
                return -EINVAL;
 
        if (!setup_vmgexit_scratch(svm, in, bytes))
-               return -EINVAL;
+               return 1;
 
        return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
                                    count, in);
index 5630c241d5f6e0bdf1899163cfdfef57c18b5d47..5151efa424acb3b441210014429ff1d60f692c14 100644 (file)
@@ -1585,6 +1585,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        to_svm(vcpu)->vmcb->save.rflags = rflags;
 }
 
+static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
+{
+       struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+
+       return sev_es_guest(vcpu->kvm)
+               ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
+               : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
        switch (reg) {
@@ -3568,14 +3577,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
        if (!gif_set(svm))
                return true;
 
-       if (sev_es_guest(vcpu->kvm)) {
-               /*
-                * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
-                * bit to determine the state of the IF flag.
-                */
-               if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
-                       return true;
-       } else if (is_guest_mode(vcpu)) {
+       if (is_guest_mode(vcpu)) {
                /* As long as interrupts are being delivered...  */
                if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
                    ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
@@ -3586,7 +3588,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
                if (nested_exit_on_intr(svm))
                        return false;
        } else {
-               if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+               if (!svm_get_if_flag(vcpu))
                        return true;
        }
 
@@ -4621,6 +4623,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .cache_reg = svm_cache_reg,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
+       .get_if_flag = svm_get_if_flag,
 
        .tlb_flush_all = svm_flush_tlb,
        .tlb_flush_current = svm_flush_tlb,
@@ -4651,7 +4654,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .load_eoi_exitmap = svm_load_eoi_exitmap,
        .hwapic_irr_update = svm_hwapic_irr_update,
        .hwapic_isr_update = svm_hwapic_isr_update,
-       .sync_pir_to_irr = kvm_lapic_find_highest_irr,
        .apicv_post_state_restore = avic_post_state_restore,
 
        .set_tss_addr = svm_set_tss_addr,
index 5faad3dc10e27ac0dc987cd6c7041fd8b2ea4162..1c7306c370fa3c4924a83371c1a1b21da8f6c5b2 100644 (file)
@@ -79,6 +79,7 @@ struct kvm_sev_info {
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
        struct kvm *enc_context_owner; /* Owner of copied encryption context */
+       unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
        struct misc_cg *misc_cg; /* For misc cgroup accounting */
        atomic_t migration_in_progress;
 };
index 1e2f669515665b233e591343458c5ccce54ee32c..9c941535f78c050a45a5d134dbb3b020a06a755f 100644 (file)
@@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
        WARN_ON(!enable_vpid);
 
        /*
-        * If VPID is enabled and used by vmc12, but L2 does not have a unique
-        * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
-        * a VPID for L2, flush the current context as the effective ASID is
-        * common to both L1 and L2.
-        *
-        * Defer the flush so that it runs after vmcs02.EPTP has been set by
-        * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
-        * redundant flushes further down the nested pipeline.
-        *
-        * If a TLB flush isn't required due to any of the above, and vpid12 is
-        * changing then the new "virtual" VPID (vpid12) will reuse the same
-        * "real" VPID (vpid02), and so needs to be flushed.  There's no direct
-        * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
-        * all nested vCPUs.  Remember, a flush on VM-Enter does not invalidate
-        * guest-physical mappings, so there is no need to sync the nEPT MMU.
+        * VPID is enabled and in use by vmcs12.  If vpid12 is changing, then
+        * emulate a guest TLB flush as KVM does not track vpid12 history nor
+        * is the VPID incorporated into the MMU context.  I.e. KVM must assume
+        * that the new vpid12 has never been used and thus represents a new
+        * guest ASID that cannot have entries in the TLB.
         */
-       if (!nested_has_guest_tlb_tag(vcpu)) {
-               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
-       } else if (is_vmenter &&
-                  vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+       if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
                vmx->nested.last_vpid = vmcs12->virtual_processor_id;
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+               return;
        }
+
+       /*
+        * If VPID is enabled, used by vmc12, and vpid12 is not changing but
+        * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
+        * KVM was unable to allocate a VPID for L2, flush the current context
+        * as the effective ASID is common to both L1 and L2.
+        */
+       if (!nested_has_guest_tlb_tag(vcpu))
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
 }
 
 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
            WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
-                                    vmcs12->guest_ia32_perf_global_ctrl)))
+                                    vmcs12->guest_ia32_perf_global_ctrl))) {
+               *entry_failure_code = ENTRY_FAIL_DEFAULT;
                return -EINVAL;
+       }
 
        kvm_rsp_write(vcpu, vmcs12->guest_rsp);
        kvm_rip_write(vcpu, vmcs12->guest_rip);
@@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
        };
        u32 failed_index;
 
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        evaluate_pending_interrupts = exec_controls_get(vmx) &
                (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                (void)nested_get_evmcs_page(vcpu);
        }
 
-       /* Service the TLB flush request for L2 before switching to L1. */
-       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-               kvm_vcpu_flush_tlb_current(vcpu);
+       /* Service pending TLB flush requests for L2 before switching to L1. */
+       kvm_service_local_tlb_flush_requests(vcpu);
 
        /*
         * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
+       vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
        vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
@@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
                struct vmcs_hdr hdr;
 
-               if (ghc->gpa != vmptr &&
-                   kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
                        /*
                         * Reads from an unbacked page return all 1s,
                         * which means that the 32 bits located at the
index 5f81ef092bd436b1a25ded21fbab536d6743ca24..1c94783b5a54c5520466bb8b3753c89cfa1d5850 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/cpu.h>
 
 #include "lapic.h"
+#include "irq.h"
 #include "posted_intr.h"
 #include "trace.h"
 #include "vmx.h"
@@ -77,13 +78,18 @@ after_clear_sn:
                pi_set_on(pi_desc);
 }
 
+static bool vmx_can_use_vtd_pi(struct kvm *kvm)
+{
+       return irqchip_in_kernel(kvm) && enable_apicv &&
+               kvm_arch_has_assigned_device(kvm) &&
+               irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
 {
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc old, new;
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
-       if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
-               !kvm_vcpu_apicv_active(vcpu))
+       if (!vmx_can_use_vtd_pi(vcpu->kvm))
                return 0;
 
        WARN_ON(irqs_disabled());
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
        struct vcpu_data vcpu_info;
        int idx, ret = 0;
 
-       if (!kvm_arch_has_assigned_device(kvm) ||
-           !irq_remapping_cap(IRQ_POSTING_CAP) ||
-           !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+       if (!vmx_can_use_vtd_pi(kvm))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index ba66c171d951ba06308503570e4d247b40825914..0dbf94eb954fdc3e058777ed2f51551b1de4d25c 100644 (file)
@@ -1363,6 +1363,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
                vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
+static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
+{
+       return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -2646,15 +2651,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
-               if (IS_ENABLED(CONFIG_HYPERV) &&
-                   static_branch_unlikely(&enable_evmcs) &&
-                   (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
-                       struct hv_enlightened_vmcs *evmcs =
-                               (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
-                       evmcs->hv_enlightenments_control.msr_bitmap = 1;
-               }
        }
 
        memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
@@ -2918,6 +2914,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
        }
 }
 
+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+{
+       if (is_guest_mode(vcpu))
+               return nested_get_vpid02(vcpu);
+       return to_vmx(vcpu)->vpid;
+}
+
 static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -2930,31 +2933,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
        if (enable_ept)
                ept_sync_context(construct_eptp(vcpu, root_hpa,
                                                mmu->shadow_root_level));
-       else if (!is_guest_mode(vcpu))
-               vpid_sync_context(to_vmx(vcpu)->vpid);
        else
-               vpid_sync_context(nested_get_vpid02(vcpu));
+               vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
 {
        /*
-        * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+        * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
         * vmx_flush_tlb_guest() for an explanation of why this is ok.
         */
-       vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+       vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
 }
 
 static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
 {
        /*
-        * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
-        * or a vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit
-        * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+        * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
+        * vpid couldn't be allocated for this vCPU.  VM-Enter and VM-Exit are
+        * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
         * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
         * i.e. no explicit INVVPID is necessary.
         */
-       vpid_sync_context(to_vmx(vcpu)->vpid);
+       vpid_sync_context(vmx_get_current_vpid(vcpu));
 }
 
 void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -3963,8 +3964,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
        if (pi_test_and_set_on(&vmx->pi_desc))
                return 0;
 
-       if (vcpu != kvm_get_running_vcpu() &&
-           !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+       if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
                kvm_vcpu_kick(vcpu);
 
        return 0;
@@ -5881,18 +5881,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                vmx_flush_pml_buffer(vcpu);
 
        /*
-        * We should never reach this point with a pending nested VM-Enter, and
-        * more specifically emulation of L2 due to invalid guest state (see
-        * below) should never happen as that means we incorrectly allowed a
-        * nested VM-Enter with an invalid vmcs12.
+        * KVM should never reach this point with a pending nested VM-Enter.
+        * More specifically, short-circuiting VM-Entry to emulate L2 due to
+        * invalid guest state should never happen as that means KVM knowingly
+        * allowed a nested VM-Enter with an invalid vmcs12.  More below.
         */
        if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
                return -EIO;
 
-       /* If guest state is invalid, start emulating */
-       if (vmx->emulation_required)
-               return handle_invalid_guest_state(vcpu);
-
        if (is_guest_mode(vcpu)) {
                /*
                 * PML is never enabled when running L2, bail immediately if a
@@ -5914,10 +5910,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                 */
                nested_mark_vmcs12_pages_dirty(vcpu);
 
+               /*
+                * Synthesize a triple fault if L2 state is invalid.  In normal
+                * operation, nested VM-Enter rejects any attempt to enter L2
+                * with invalid state.  However, those checks are skipped if
+                * state is being stuffed via RSM or KVM_SET_NESTED_STATE.  If
+                * L2 state is invalid, it means either L1 modified SMRAM state
+                * or userspace provided bad state.  Synthesize TRIPLE_FAULT as
+                * doing so is architecturally allowed in the RSM case, and is
+                * the least awful solution for the userspace case without
+                * risking false positives.
+                */
+               if (vmx->emulation_required) {
+                       nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
+                       return 1;
+               }
+
                if (nested_vmx_reflect_vmexit(vcpu))
                        return 1;
        }
 
+       /* If guest state is invalid, start emulating.  L2 is handled above. */
+       if (vmx->emulation_required)
+               return handle_invalid_guest_state(vcpu);
+
        if (exit_reason.failed_vmentry) {
                dump_vmcs(vcpu);
                vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -6262,9 +6278,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int max_irr;
-       bool max_irr_updated;
+       bool got_posted_interrupt;
 
-       if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
+       if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
                return -EIO;
 
        if (pi_test_on(&vmx->pi_desc)) {
@@ -6274,22 +6290,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
                 * But on x86 this is just a compiler barrier anyway.
                 */
                smp_mb__after_atomic();
-               max_irr_updated =
+               got_posted_interrupt =
                        kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
-               /*
-                * If we are running L2 and L1 has a new pending interrupt
-                * which can be injected, this may cause a vmexit or it may
-                * be injected into L2.  Either way, this interrupt will be
-                * processed via KVM_REQ_EVENT, not RVI, because we do not use
-                * virtual interrupt delivery to inject L1 interrupts into L2.
-                */
-               if (is_guest_mode(vcpu) && max_irr_updated)
-                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else {
                max_irr = kvm_lapic_find_highest_irr(vcpu);
+               got_posted_interrupt = false;
        }
-       vmx_hwapic_irr_update(vcpu, max_irr);
+
+       /*
+        * Newly recognized interrupts are injected via either virtual interrupt
+        * delivery (RVI) or KVM_REQ_EVENT.  Virtual interrupt delivery is
+        * disabled in two cases:
+        *
+        * 1) If L2 is running and the vCPU has a new pending interrupt.  If L1
+        * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
+        * VM-Exit to L1.  If L1 doesn't want to exit, the interrupt is injected
+        * into L2, but KVM doesn't use virtual interrupt delivery to inject
+        * interrupts into L2, and so KVM_REQ_EVENT is again needed.
+        *
+        * 2) If APICv is disabled for this vCPU, assigned devices may still
+        * attempt to post interrupts.  The posted interrupt vector will cause
+        * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
+        */
+       if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
+               vmx_set_rvi(max_irr);
+       else if (got_posted_interrupt)
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+
        return max_irr;
 }
 
@@ -6601,9 +6628,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * consistency check VM-Exit due to invalid guest state and bail.
         */
        if (unlikely(vmx->emulation_required)) {
-
-               /* We don't emulate invalid state of a nested guest */
-               vmx->fail = is_guest_mode(vcpu);
+               vmx->fail = 0;
 
                vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
                vmx->exit_reason.failed_vmentry = 1;
@@ -6826,6 +6851,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
        if (err < 0)
                goto free_pml;
 
+       /*
+        * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
+        * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
+        * feature only for vmcs01, KVM currently isn't equipped to realize any
+        * performance benefits from enabling it for vmcs02.
+        */
+       if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+           (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+               struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
+
+               evmcs->hv_enlightenments_control.msr_bitmap = 1;
+       }
+
        /* The MSR bitmap starts with all ones */
        bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
        bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -7509,6 +7547,7 @@ static void hardware_unsetup(void)
 static bool vmx_check_apicv_inhibit_reasons(ulong bit)
 {
        ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+                         BIT(APICV_INHIBIT_REASON_ABSENT) |
                          BIT(APICV_INHIBIT_REASON_HYPERV) |
                          BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
 
@@ -7558,6 +7597,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
+       .get_if_flag = vmx_get_if_flag,
 
        .tlb_flush_all = vmx_flush_tlb_all,
        .tlb_flush_current = vmx_flush_tlb_current,
@@ -7761,10 +7801,10 @@ static __init int hardware_setup(void)
                ple_window_shrink = 0;
        }
 
-       if (!cpu_has_vmx_apicv()) {
+       if (!cpu_has_vmx_apicv())
                enable_apicv = 0;
+       if (!enable_apicv)
                vmx_x86_ops.sync_pir_to_irr = NULL;
-       }
 
        if (cpu_has_vmx_tsc_scaling()) {
                kvm_has_tsc_control = true;
index 5a403d92833f51e4f77d6fd67b38fd85efddb698..e50e97ac44084454f98fdcb01b293c21687ca944 100644 (file)
@@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
            !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
                return 1;
 
-       if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+       if (!(cr0 & X86_CR0_PG) &&
+           (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
                return 1;
 
        static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -1330,7 +1331,7 @@ static const u32 msrs_to_save_all[] = {
        MSR_IA32_UMWAIT_CONTROL,
 
        MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
-       MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
+       MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
        MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
        MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
        MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@@ -3258,6 +3259,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
 }
 
+
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+       ++vcpu->stat.tlb_flush;
+       static_call(kvm_x86_tlb_flush_current)(vcpu);
+}
+
+/*
+ * Service "local" TLB flush requests, which are specific to the current MMU
+ * context.  In addition to the generic event handling in vcpu_enter_guest(),
+ * TLB flushes that are targeted at an MMU context also need to be serviced
+ * prior before nested VM-Enter/VM-Exit.
+ */
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
+{
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+               kvm_vcpu_flush_tlb_current(vcpu);
+
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+               kvm_vcpu_flush_tlb_guest(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
        struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@@ -3389,7 +3413,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
                if (!msr_info->host_initiated)
                        return 1;
-               if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
+               if (kvm_get_msr_feature(&msr_ent))
                        return 1;
                if (data & ~msr_ent.data)
                        return 1;
@@ -4133,6 +4157,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SGX_ATTRIBUTE:
 #endif
        case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+       case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
        case KVM_CAP_SREGS2:
        case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
        case KVM_CAP_VCPU_ATTRIBUTES:
@@ -4448,8 +4473,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       if (vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
 }
@@ -5124,6 +5148,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid __user *cpuid_arg = argp;
                struct kvm_cpuid cpuid;
 
+               /*
+                * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+                * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+                * tracked in kvm_mmu_page_role.  As a result, KVM may miss guest page
+                * faults due to reusing SPs/SPTEs.  In practice no sane VMM mucks with
+                * the core vCPU model on the fly, so fail.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5134,6 +5169,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid2 __user *cpuid_arg = argp;
                struct kvm_cpuid2 cpuid;
 
+               /*
+                * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+                * KVM_SET_CPUID case above.
+                */
+               r = -EINVAL;
+               if (vcpu->arch.last_vmentry_cpu != -1)
+                       goto out;
+
                r = -EFAULT;
                if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
@@ -5698,6 +5741,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
                kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
                r = 0;
 split_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -6078,6 +6122,7 @@ set_identity_unlock:
                /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
                smp_wmb();
                kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+               kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
                break;
@@ -7077,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                           unsigned short port, void *val, unsigned int count)
 {
        if (vcpu->arch.pio.count) {
-               /* Complete previous iteration.  */
+               /*
+                * Complete a previous iteration that required userspace I/O.
+                * Note, @count isn't guaranteed to match pio.count as userspace
+                * can modify ECX before rerunning the vCPU.  Ignore any such
+                * shenanigans as KVM doesn't support modifying the rep count,
+                * and the emulator ensures @count doesn't overflow the buffer.
+                */
        } else {
                int r = __emulator_pio_in(vcpu, size, port, count);
                if (!r)
@@ -7086,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
                /* Results already available, fall through.  */
        }
 
-       WARN_ON(count != vcpu->arch.pio.count);
        complete_emulator_pio_in(vcpu, val);
        return 1;
 }
@@ -8776,10 +8826,9 @@ static void kvm_apicv_init(struct kvm *kvm)
 {
        init_rwsem(&kvm->arch.apicv_update_lock);
 
-       if (enable_apicv)
-               clear_bit(APICV_INHIBIT_REASON_DISABLE,
-                         &kvm->arch.apicv_inhibit_reasons);
-       else
+       set_bit(APICV_INHIBIT_REASON_ABSENT,
+               &kvm->arch.apicv_inhibit_reasons);
+       if (!enable_apicv)
                set_bit(APICV_INHIBIT_REASON_DISABLE,
                        &kvm->arch.apicv_inhibit_reasons);
 }
@@ -8952,14 +9001,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
 
-       /*
-        * if_flag is obsolete and useless, so do not bother
-        * setting it for SEV-ES guests.  Userspace can just
-        * use kvm_run->ready_for_interrupt_injection.
-        */
-       kvm_run->if_flag = !vcpu->arch.guest_state_protected
-               && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
-
+       kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
        kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
 
@@ -9528,8 +9570,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
        else {
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
                if (ioapic_in_kernel(vcpu->kvm))
                        kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
@@ -9648,10 +9689,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        /* Flushing all ASIDs flushes the current ASID... */
                        kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
-                       kvm_vcpu_flush_tlb_current(vcpu);
-               if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
-                       kvm_vcpu_flush_tlb_guest(vcpu);
+               kvm_service_local_tlb_flush_requests(vcpu);
 
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -9802,10 +9840,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        /*
         * This handles the case where a posted interrupt was
-        * notified with kvm_vcpu_kick.
+        * notified with kvm_vcpu_kick.  Assigned devices can
+        * use the POSTED_INTR_VECTOR even if APICv is disabled,
+        * so do it even if APICv is disabled on this vCPU.
         */
-       if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
-               static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+       if (kvm_lapic_enabled(vcpu))
+               static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
        if (kvm_vcpu_exit_request(vcpu)) {
                vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9849,8 +9889,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
                        break;
 
-               if (vcpu->arch.apicv_active)
-                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+               if (kvm_lapic_enabled(vcpu))
+                       static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
 
                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
index 997669ae9caa21749d2339a48e761132b04201fd..4abcd8d9836ddc2296748069f2bfcf9c26fe8c9c 100644 (file)
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
 
 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
 
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
 int kvm_check_nested_events(struct kvm_vcpu *vcpu);
 
 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
 }
 
-static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
-{
-       ++vcpu->stat.tlb_flush;
-       static_call(kvm_x86_tlb_flush_current)(vcpu);
-}
-
 static inline int is_pae(struct kvm_vcpu *vcpu)
 {
        return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
index 726700fabca6d4f263b3b45f659a736e4c7013e4..bafe36e69227d81e30d81ef2680ace5b8d8da39b 100644 (file)
@@ -1252,19 +1252,54 @@ st:                     if (is_imm8(insn->off))
                case BPF_LDX | BPF_MEM | BPF_DW:
                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
-                               /* test src_reg, src_reg */
-                               maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
-                               EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
-                               /* jne start_of_ldx */
-                               EMIT2(X86_JNE, 0);
+                               /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
+                                * add abs(insn->off) to the limit to make sure that negative
+                                * offset won't be an issue.
+                                * insn->off is s16, so it won't affect valid pointers.
+                                */
+                               u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
+                               u8 *end_of_jmp1, *end_of_jmp2;
+
+                               /* Conservatively check that src_reg + insn->off is a kernel address:
+                                * 1. src_reg + insn->off >= limit
+                                * 2. src_reg + insn->off doesn't become small positive.
+                                * Cannot do src_reg + insn->off >= limit in one branch,
+                                * since it needs two spare registers, but JIT has only one.
+                                */
+
+                               /* movabsq r11, limit */
+                               EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
+                               EMIT((u32)limit, 4);
+                               EMIT(limit >> 32, 4);
+                               /* cmp src_reg, r11 */
+                               maybe_emit_mod(&prog, src_reg, AUX_REG, true);
+                               EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+                               /* if unsigned '<' goto end_of_jmp2 */
+                               EMIT2(X86_JB, 0);
+                               end_of_jmp1 = prog;
+
+                               /* mov r11, src_reg */
+                               emit_mov_reg(&prog, true, AUX_REG, src_reg);
+                               /* add r11, insn->off */
+                               maybe_emit_1mod(&prog, AUX_REG, true);
+                               EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
+                               /* jmp if not carry to start_of_ldx
+                                * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
+                                * that has to be rejected.
+                                */
+                               EMIT2(0x73 /* JNC */, 0);
+                               end_of_jmp2 = prog;
+
                                /* xor dst_reg, dst_reg */
                                emit_mov_imm32(&prog, false, dst_reg, 0);
                                /* jmp byte_after_ldx */
                                EMIT2(0xEB, 0);
 
-                               /* populate jmp_offset for JNE above */
-                               temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
+                               /* populate jmp_offset for JB above to jump to xor dst_reg */
+                               end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
+                               /* populate jmp_offset for JNC above to jump to start_of_ldx */
                                start_of_ldx = prog;
+                               end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
                        }
                        emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
@@ -1305,7 +1340,7 @@ st:                       if (is_imm8(insn->off))
                                 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
                                 * of 4 bytes will be ignored and rbx will be zero inited.
                                 */
-                               ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
+                               ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
                        }
                        break;
 
index b15ebfe40a73ea16660d68c6aa624094bbe8a8ad..b0b848d6933afbcf118415ea6689488e28675054 100644 (file)
@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
-       new = early_memremap(data.phys_map, data.size);
+       new = early_memremap_prot(data.phys_map, data.size,
+                                 pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
        if (!new) {
                pr_err("Failed to map new boot services memmap\n");
                return;
index 4a3da7592b99c938eed72dd583475bef4ae131a1..38d24d2ab38b3329e3ec3f9f527b573688c7dbe9 100644 (file)
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
 #ifdef CONFIG_X86_64
        u64 *trampoline_pgd;
        u64 efer;
+       int i;
 #endif
 
        base = (unsigned char *)real_mode_header;
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
        trampoline_header->flags = 0;
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+       /* Map the real mode stub as virtual == physical */
        trampoline_pgd[0] = trampoline_pgd_entry.pgd;
-       trampoline_pgd[511] = init_top_pgt[511].pgd;
+
+       /*
+        * Include the entirety of the kernel mapping into the trampoline
+        * PGD.  This way, all mappings present in the normal kernel page
+        * tables are usable while running on trampoline_pgd.
+        */
+       for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+               trampoline_pgd[i] = init_top_pgt[i].pgd;
 #endif
 
        sme_sev_setup_real_mode(trampoline_header);
index 220dd96784947624d9d43fb62dd72e4ae0614936..444d824775f6a9ccb10929c55ac980af7f73376e 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <../entry/calling.h>
 
 .pushsection .noinstr.text, "ax"
 /*
@@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
        jmp hypercall_iret
 SYM_CODE_END(xen_iret)
 
+/*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and
+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
+ * frame at the same address is useless.
+ */
+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
+       UNWIND_HINT_REGS
+       POP_REGS
+
+       /* stackleak_erase() can work safely on the kernel stack. */
+       STACKLEAK_ERASE_NOCLOBBER
+
+       addq    $8, %rsp        /* skip regs->orig_ax */
+       jmp xen_iret
+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
+
 /*
  * Xen handles syscall callbacks much like ordinary exceptions, which
  * means we have:
index a5b37cc65b171f3362ce87ab590cac8d2c65bee2..769b64394298995288a3005e9e3638897c395d24 100644 (file)
@@ -2311,7 +2311,14 @@ static void ioc_timer_fn(struct timer_list *timer)
                        hwm = current_hweight_max(iocg);
                        new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
                                                         usage, &now);
-                       if (new_hwi < hwm) {
+                       /*
+                        * Donation calculation assumes hweight_after_donation
+                        * to be positive, a condition that a donor w/ hwa < 2
+                        * can't meet. Don't bother with donation if hwa is
+                        * below 2. It's not gonna make a meaningful difference
+                        * anyway.
+                        */
+                       if (new_hwi < hwm && hwa >= 2) {
                                iocg->hweight_donating = hwa;
                                iocg->hweight_after_donation = new_hwi;
                                list_add(&iocg->surplus_list, &surpluses);
index ad732a36f9b303f7cb202eea48c00a488e800417..0da147edbd1864f59ce30c6c3f7af350d9a2cf4d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/falloc.h>
 #include <linux/suspend.h>
 #include <linux/fs.h>
+#include <linux/module.h>
 #include "blk.h"
 
 static inline struct inode *bdev_file_inode(struct file *file)
@@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
        } else {
                ret = bio_iov_iter_get_pages(bio, iter);
                if (unlikely(ret)) {
-                       bio->bi_status = BLK_STS_IOERR;
-                       bio_endio(bio);
+                       bio_put(bio);
                        return ret;
                }
        }
index 313c14a70bbd3985bd96a81767b0ebb776b3e341..6f01d35a5145a66ffac5e7852c943df3b0f66c10 100644 (file)
@@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                pgrp = task_pgrp(current);
                        else
                                pgrp = find_vpid(who);
+                       read_lock(&tasklist_lock);
                        do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
                                tmpio = get_task_ioprio(p);
                                if (tmpio < 0)
@@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
                                else
                                        ret = ioprio_best(ret, tmpio);
                        } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+                       read_unlock(&tasklist_lock);
+
                        break;
                case IOPRIO_WHO_USER:
                        uid = make_kuid(current_user_ns(), who);
index be5d40ae14882ddf7daba9e3df16ce1499bffd3e..a110338c860c770d4affa688375dc2086007a6b5 100644 (file)
@@ -41,8 +41,7 @@ obj-$(CONFIG_DMADEVICES)      += dma/
 # SOC specific infrastructure drivers.
 obj-y                          += soc/
 
-obj-$(CONFIG_VIRTIO)           += virtio/
-obj-$(CONFIG_VIRTIO_PCI_LIB)   += virtio/
+obj-y                          += virtio/
 obj-$(CONFIG_VDPA)             += vdpa/
 obj-$(CONFIG_XEN)              += xen/
 
index cdbdf68bd98f5ce3e7ef485580e102fd16694e67..60b5424bd318bb44fc848cfad8dd5281ad1aa2b3 100644 (file)
@@ -524,6 +524,23 @@ config ACPI_PPTT
        bool
 endif
 
+config ACPI_PCC
+       bool "ACPI PCC Address Space"
+       depends on PCC
+       default y
+       help
+         The PCC Address Space also referred as PCC Operation Region pertains
+         to the region of PCC subspace that succeeds the PCC signature.
+
+         The PCC Operation Region works in conjunction with the PCC Table
+         (Platform Communications Channel Table). PCC subspaces that are
+         marked for use as PCC Operation Regions must not be used as PCC
+         subspaces for the standard ACPI features such as CPPC, RASF, PDTT and
+         MPST. These standard features must always use the PCC Table instead.
+
+         Enable this feature if you want to set up and install the PCC Address
+         Space handler to handle PCC OpRegion in the firmware.
+
 source "drivers/acpi/pmic/Kconfig"
 
 config ACPI_VIOT
index 3018714e87d9c0e8fc5801896241b4780bb1ca66..08c2d985c57cc5af62857882806532c79d3fb7f2 100644 (file)
@@ -67,6 +67,7 @@ acpi-$(CONFIG_ACPI_LPIT)      += acpi_lpit.o
 acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
 acpi-$(CONFIG_ACPI_WATCHDOG)   += acpi_watchdog.o
 acpi-$(CONFIG_ACPI_PRMT)       += prmt.o
+acpi-$(CONFIG_ACPI_PCC)                += acpi_pcc.o
 
 # Address translation
 acpi-$(CONFIG_ACPI_ADXL)       += acpi_adxl.o
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
new file mode 100644 (file)
index 0000000..41e3ebd
--- /dev/null
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ * Copyright 2021 Arm Limited
+ *
+ * The PCC Address Space also referred as PCC Operation Region pertains to the
+ * region of PCC subspace that succeeds the PCC signature. The PCC Operation
+ * Region works in conjunction with the PCC Table(Platform Communications
+ * Channel Table). PCC subspaces that are marked for use as PCC Operation
+ * Regions must not be used as PCC subspaces for the standard ACPI features
+ * such as CPPC, RASF, PDTT and MPST. These standard features must always use
+ * the PCC Table instead.
+ *
+ * This driver sets up the PCC Address Space and installs an handler to enable
+ * handling of PCC OpRegion in the firmware.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+#include <linux/io.h>
+
+#include <acpi/pcc.h>
+
+struct pcc_data {
+       struct pcc_mbox_chan *pcc_chan;
+       void __iomem *pcc_comm_addr;
+       struct completion done;
+       struct mbox_client cl;
+       struct acpi_pcc_info ctx;
+};
+
+struct acpi_pcc_info pcc_ctx;
+
+static void pcc_rx_callback(struct mbox_client *cl, void *m)
+{
+       struct pcc_data *data = container_of(cl, struct pcc_data, cl);
+
+       complete(&data->done);
+}
+
+static acpi_status
+acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
+                            void *handler_context,  void **region_context)
+{
+       struct pcc_data *data;
+       struct acpi_pcc_info *ctx = handler_context;
+       struct pcc_mbox_chan *pcc_chan;
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return AE_NO_MEMORY;
+
+       data->cl.rx_callback = pcc_rx_callback;
+       data->cl.knows_txdone = true;
+       data->ctx.length = ctx->length;
+       data->ctx.subspace_id = ctx->subspace_id;
+       data->ctx.internal_buffer = ctx->internal_buffer;
+
+       init_completion(&data->done);
+       data->pcc_chan = pcc_mbox_request_channel(&data->cl, ctx->subspace_id);
+       if (IS_ERR(data->pcc_chan)) {
+               pr_err("Failed to find PCC channel for subspace %d\n",
+                      ctx->subspace_id);
+               return AE_NOT_FOUND;
+       }
+
+       pcc_chan = data->pcc_chan;
+       data->pcc_comm_addr = acpi_os_ioremap(pcc_chan->shmem_base_addr,
+                                             pcc_chan->shmem_size);
+       if (!data->pcc_comm_addr) {
+               pr_err("Failed to ioremap PCC comm region mem for %d\n",
+                      ctx->subspace_id);
+               return AE_NO_MEMORY;
+       }
+
+       *region_context = data;
+       return AE_OK;
+}
+
+static acpi_status
+acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
+                              u32 bits, acpi_integer *value,
+                              void *handler_context, void *region_context)
+{
+       int ret;
+       struct pcc_data *data = region_context;
+
+       reinit_completion(&data->done);
+
+       /* Write to Shared Memory */
+       memcpy_toio(data->pcc_comm_addr, (void *)value, data->ctx.length);
+
+       ret = mbox_send_message(data->pcc_chan->mchan, NULL);
+       if (ret < 0)
+               return AE_ERROR;
+
+       if (data->pcc_chan->mchan->mbox->txdone_irq)
+               wait_for_completion(&data->done);
+
+       mbox_client_txdone(data->pcc_chan->mchan, ret);
+
+       memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);
+
+       return AE_OK;
+}
+
+void __init acpi_init_pcc(void)
+{
+       acpi_status status;
+
+       status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
+                                                   ACPI_ADR_SPACE_PLATFORM_COMM,
+                                                   &acpi_pcc_address_space_handler,
+                                                   &acpi_pcc_address_space_setup,
+                                                   &pcc_ctx);
+       if (ACPI_FAILURE(status))
+               pr_alert("OperationRegion handler could not be installed\n");
+}
index 42ede059728ce3e93c12f9d8f1eb54183ba51f2e..990ff5b0aeb875d919efad799aa36fb6d5183f8e 100644 (file)
@@ -1733,13 +1733,12 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
 {
        struct acpi_device *device = context;
        struct acpi_device *sibling;
-       int result;
 
        if (handle == device->handle)
                return AE_CTRL_TERMINATE;
 
-       result = acpi_bus_get_device(handle, &sibling);
-       if (result)
+       sibling = acpi_fetch_acpi_dev(handle);
+       if (!sibling)
                return AE_OK;
 
        if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
index 82a75964343b2211992e10dee21585a70fb0cd71..b29ba436944ad5a29065c326c409aa3eedee916c 100644 (file)
@@ -223,6 +223,11 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
                             u32 function,
                             void *handler_context, void **region_context);
 
+acpi_status
+acpi_ev_data_table_region_setup(acpi_handle handle,
+                               u32 function,
+                               void *handler_context, void **region_context);
+
 acpi_status
 acpi_ev_default_region_setup(acpi_handle handle,
                             u32 function,
index 9db5ae0f79ea04b93dbd006c334a5370cbe7f97f..0aa0d847cb255002513f6c9f79235e378510a280 100644 (file)
@@ -138,6 +138,7 @@ struct acpi_object_region {
        union acpi_operand_object *next;
        acpi_physical_address address;
        u32 length;
+       void *pointer;          /* Only for data table regions */
 };
 
 struct acpi_object_method {
index e2d0046799a211a114fafa8169e218f770923cba..533802fe73e90d64658e0a7699d44e5727824f13 100644 (file)
@@ -35,7 +35,8 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
 
 acpi_status
 acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
-                          acpi_physical_address address, u8 flags);
+                          acpi_physical_address address,
+                          u8 flags, struct acpi_table_header *table);
 
 void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
 
@@ -86,6 +87,7 @@ acpi_tb_release_table(struct acpi_table_header *table,
 acpi_status
 acpi_tb_install_standard_table(acpi_physical_address address,
                               u8 flags,
+                              struct acpi_table_header *table,
                               u8 reload, u8 override, u32 *table_index);
 
 void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc);
@@ -95,7 +97,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
 
 acpi_status
 acpi_tb_install_and_load_table(acpi_physical_address address,
-                              u8 flags, u8 override, u32 *table_index);
+                              u8 flags,
+                              struct acpi_table_header *table,
+                              u8 override, u32 *table_index);
 
 acpi_status acpi_tb_unload_table(u32 table_index);
 
index 639635291ab76bb874a2000ff67f9296844d19d6..44c448269861a3aea8edbbe4ce837d4d815e4a6f 100644 (file)
@@ -531,6 +531,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
 
        obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
        obj_desc->region.length = table->length;
+       obj_desc->region.pointer = table;
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
                          obj_desc,
index c0cd7147a5a394cf2bb94f8c70c9ce23066a264a..8f43d38dc4ca203efed8352b608771e5f240b758 100644 (file)
@@ -386,7 +386,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
                case ACPI_ADR_SPACE_DATA_TABLE:
 
                        handler = acpi_ex_data_table_space_handler;
-                       setup = NULL;
+                       setup = acpi_ev_data_table_region_setup;
                        break;
 
                default:
index 4ef43c8ef5e78a0cc1fb8ba16745dba86dbbd9c0..b9d77d327d38fb25338ab7da7d1f86fe827370ae 100644 (file)
@@ -162,6 +162,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                        return_ACPI_STATUS(AE_NOT_EXIST);
                }
 
+               if (region_obj->region.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+                       struct acpi_pcc_info *ctx =
+                           handler_desc->address_space.context;
+
+                       ctx->internal_buffer =
+                           field_obj->field.internal_pcc_buffer;
+                       ctx->length = (u16)region_obj->region.length;
+                       ctx->subspace_id = (u8)region_obj->region.address;
+               }
+
                /*
                 * We must exit the interpreter because the region setup will
                 * potentially execute control methods (for example, the _REG method
index 984c172453bfc5675f3115883389de9a0be647db..d28dee929e61a72c4fc9fb952ffd0cb07ee7a832 100644 (file)
@@ -406,6 +406,58 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
        return_ACPI_STATUS(AE_OK);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_data_table_region_setup
+ *
+ * PARAMETERS:  handle              - Region we are interested in
+ *              function            - Start or stop
+ *              handler_context     - Address space handler context
+ *              region_context      - Region specific context
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Setup a data_table_region
+ *
+ * MUTEX:       Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_data_table_region_setup(acpi_handle handle,
+                               u32 function,
+                               void *handler_context, void **region_context)
+{
+       union acpi_operand_object *region_desc =
+           (union acpi_operand_object *)handle;
+       struct acpi_data_table_space_context *local_region_context;
+
+       ACPI_FUNCTION_TRACE(ev_data_table_region_setup);
+
+       if (function == ACPI_REGION_DEACTIVATE) {
+               if (*region_context) {
+                       ACPI_FREE(*region_context);
+                       *region_context = NULL;
+               }
+               return_ACPI_STATUS(AE_OK);
+       }
+
+       /* Create a new context */
+
+       local_region_context =
+           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_data_table_space_context));
+       if (!(local_region_context)) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
+       /* Save the data table pointer for use in the handler */
+
+       local_region_context->pointer = region_desc->region.pointer;
+
+       *region_context = local_region_context;
+       return_ACPI_STATUS(AE_OK);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_default_region_setup
index 0cd9b3738e7656fb4172469fb1bf9ad6e1000584..6c2685a6a4c1cc89333df447c9c568dc454de936 100644 (file)
@@ -411,7 +411,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
        acpi_ex_exit_interpreter();
        status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
                                                ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
-                                               TRUE, &table_index);
+                                               table, TRUE, &table_index);
        acpi_ex_enter_interpreter();
        if (ACPI_FAILURE(status)) {
 
index 80b52ad55775977b8a11df6f89305d6d2bd6f3c0..deb3674ae726feff83417ee56780a1a5cefb1b37 100644 (file)
@@ -279,6 +279,7 @@ acpi_ex_create_region(u8 * aml_start,
        obj_desc->region.space_id = space_id;
        obj_desc->region.address = 0;
        obj_desc->region.length = 0;
+       obj_desc->region.pointer = NULL;
        obj_desc->region.node = node;
        obj_desc->region.handler = NULL;
        obj_desc->common.flags &=
index 06f3c9df1e22d22f17d12b3c4732ad95fab118ae..8618500f23b3943e72aebc8a065993cb36266694 100644 (file)
@@ -330,12 +330,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                       obj_desc->field.base_byte_offset,
                       source_desc->buffer.pointer, data_length);
 
-               if ((obj_desc->field.region_obj->region.address ==
-                    PCC_MASTER_SUBSPACE
-                    && MASTER_SUBSPACE_COMMAND(obj_desc->field.
-                                               base_byte_offset))
-                   || GENERIC_SUBSPACE_COMMAND(obj_desc->field.
-                                               base_byte_offset)) {
+               if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) {
 
                        /* Perform the write */
 
index b639e930d64291e55461ac672079590533abeabe..44b7c350ed5ca4cd658ae5e55c410887871fb092 100644 (file)
@@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
                                                    (walk_state, return_desc,
                                                     &temp_desc);
                                                if (ACPI_FAILURE(status)) {
-                                                       goto cleanup;
+                                                       return_ACPI_STATUS
+                                                           (status);
                                                }
 
                                                return_desc = temp_desc;
index 82b713a9a1939a7c25f196fe1a58d3f0a503d488..48c19908fa4e4b701b12f0538fedf8487dc21567 100644 (file)
@@ -509,8 +509,15 @@ acpi_ex_data_table_space_handler(u32 function,
                                 u64 *value,
                                 void *handler_context, void *region_context)
 {
+       struct acpi_data_table_space_context *mapping;
+       char *pointer;
+
        ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
 
+       mapping = (struct acpi_data_table_space_context *) region_context;
+       pointer = ACPI_CAST_PTR(char, mapping->pointer) +
+           (address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
+
        /*
         * Perform the memory read or write. The bit_width was already
         * validated.
@@ -518,14 +525,14 @@ acpi_ex_data_table_space_handler(u32 function,
        switch (function) {
        case ACPI_READ:
 
-               memcpy(ACPI_CAST_PTR(char, value),
-                      ACPI_PHYSADDR_TO_PTR(address), ACPI_DIV_8(bit_width));
+               memcpy(ACPI_CAST_PTR(char, value), pointer,
+                      ACPI_DIV_8(bit_width));
                break;
 
        case ACPI_WRITE:
 
-               memcpy(ACPI_PHYSADDR_TO_PTR(address),
-                      ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
+               memcpy(pointer, ACPI_CAST_PTR(char, value),
+                      ACPI_DIV_8(bit_width));
                break;
 
        default:
index 808fdf54aeebf2f30c351905977cc463ea2fece0..7ee2939c08cd4d6a06cbd7161253c12750ea65b1 100644 (file)
@@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
 
        /* Flush caches, as per ACPI specification */
 
-       ACPI_FLUSH_CPU_CACHE();
+       if (sleep_state < ACPI_STATE_S4) {
+               ACPI_FLUSH_CPU_CACHE();
+       }
 
        status = acpi_os_enter_sleep(sleep_state, sleep_control, 0);
        if (status == AE_CTRL_TERMINATE) {
index 34a3825f25d37f6d8e0fda01207d0ad8b34e2c7d..5efa3d8e483e01c9236de20e19aad13b2a4ad0d1 100644 (file)
@@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
 
        /* Flush caches, as per ACPI specification */
 
-       ACPI_FLUSH_CPU_CACHE();
+       if (sleep_state < ACPI_STATE_S4) {
+               ACPI_FLUSH_CPU_CACHE();
+       }
 
        status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control);
        if (status == AE_CTRL_TERMINATE) {
index e4cde23a2906187f488f6af55732fee12c2908fd..ba77598ee43e8af16bf3e32c0e34f1beed1b14b9 100644 (file)
@@ -162,8 +162,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void)
                return_ACPI_STATUS(status);
        }
 
-       ACPI_FLUSH_CPU_CACHE();
-
        status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
                                    (u32)acpi_gbl_FADT.s4_bios_request, 8);
        if (ACPI_FAILURE(status)) {
index ebbca109edcb4403c406b9e3f96988aebcc61bc0..20360a9db48278f1fc2c9a398f493412bf699081 100644 (file)
@@ -89,14 +89,27 @@ acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
 {
 
        /*
-        * Initialize the table descriptor. Set the pointer to NULL, since the
-        * table is not fully mapped at this time.
+        * Initialize the table descriptor. Set the pointer to NULL for external
+        * tables, since the table is not fully mapped at this time.
         */
        memset(table_desc, 0, sizeof(struct acpi_table_desc));
        table_desc->address = address;
        table_desc->length = table->length;
        table_desc->flags = flags;
        ACPI_MOVE_32_TO_32(table_desc->signature.ascii, table->signature);
+
+       switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+       case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+       case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+
+               table_desc->pointer = table;
+               break;
+
+       case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
+       default:
+
+               break;
+       }
 }
 
 /*******************************************************************************
@@ -132,9 +145,7 @@ acpi_tb_acquire_table(struct acpi_table_desc *table_desc,
        case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
        case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
 
-               table = ACPI_CAST_PTR(struct acpi_table_header,
-                                     ACPI_PHYSADDR_TO_PTR(table_desc->
-                                                          address));
+               table = table_desc->pointer;
                break;
 
        default:
@@ -196,6 +207,8 @@ acpi_tb_release_table(struct acpi_table_header *table,
  * PARAMETERS:  table_desc          - Table descriptor to be acquired
  *              address             - Address of the table
  *              flags               - Allocation flags of the table
+ *              table               - Pointer to the table (required for virtual
+ *                                    origins, optional for physical)
  *
  * RETURN:      Status
  *
@@ -208,49 +221,52 @@ acpi_tb_release_table(struct acpi_table_header *table,
 
 acpi_status
 acpi_tb_acquire_temp_table(struct acpi_table_desc *table_desc,
-                          acpi_physical_address address, u8 flags)
+                          acpi_physical_address address,
+                          u8 flags, struct acpi_table_header *table)
 {
-       struct acpi_table_header *table_header;
+       u8 mapped_table = FALSE;
 
        switch (flags & ACPI_TABLE_ORIGIN_MASK) {
        case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
 
                /* Get the length of the full table from the header */
 
-               table_header =
-                   acpi_os_map_memory(address,
-                                      sizeof(struct acpi_table_header));
-               if (!table_header) {
-                       return (AE_NO_MEMORY);
+               if (!table) {
+                       table =
+                           acpi_os_map_memory(address,
+                                              sizeof(struct
+                                                     acpi_table_header));
+                       if (!table) {
+                               return (AE_NO_MEMORY);
+                       }
+
+                       mapped_table = TRUE;
                }
 
-               acpi_tb_init_table_descriptor(table_desc, address, flags,
-                                             table_header);
-               acpi_os_unmap_memory(table_header,
-                                    sizeof(struct acpi_table_header));
-               return (AE_OK);
+               break;
 
        case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
        case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
 
-               table_header = ACPI_CAST_PTR(struct acpi_table_header,
-                                            ACPI_PHYSADDR_TO_PTR(address));
-               if (!table_header) {
-                       return (AE_NO_MEMORY);
+               if (!table) {
+                       return (AE_BAD_PARAMETER);
                }
 
-               acpi_tb_init_table_descriptor(table_desc, address, flags,
-                                             table_header);
-               return (AE_OK);
+               break;
 
        default:
 
-               break;
+               /* Table is not valid yet */
+
+               return (AE_NO_MEMORY);
        }
 
-       /* Table is not valid yet */
+       acpi_tb_init_table_descriptor(table_desc, address, flags, table);
+       if (mapped_table) {
+               acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
+       }
 
-       return (AE_NO_MEMORY);
+       return (AE_OK);
 }
 
 /*******************************************************************************
@@ -335,7 +351,19 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
 
        acpi_tb_release_table(table_desc->pointer, table_desc->length,
                              table_desc->flags);
-       table_desc->pointer = NULL;
+
+       switch (table_desc->flags & ACPI_TABLE_ORIGIN_MASK) {
+       case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL:
+
+               table_desc->pointer = NULL;
+               break;
+
+       case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL:
+       case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL:
+       default:
+
+               break;
+       }
 
        return_VOID;
 }
@@ -959,6 +987,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
  *
  * PARAMETERS:  address                 - Physical address of the table
  *              flags                   - Allocation flags of the table
+ *              table                   - Pointer to the table (required for
+ *                                        virtual origins, optional for
+ *                                        physical)
  *              override                - Whether override should be performed
  *              table_index             - Where table index is returned
  *
@@ -970,7 +1001,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
 
 acpi_status
 acpi_tb_install_and_load_table(acpi_physical_address address,
-                              u8 flags, u8 override, u32 *table_index)
+                              u8 flags,
+                              struct acpi_table_header *table,
+                              u8 override, u32 *table_index)
 {
        acpi_status status;
        u32 i;
@@ -979,7 +1012,7 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
        /* Install the table and load it into the namespace */
 
-       status = acpi_tb_install_standard_table(address, flags, TRUE,
+       status = acpi_tb_install_standard_table(address, flags, table, TRUE,
                                                override, &i);
        if (ACPI_FAILURE(status)) {
                goto exit;
index 5174abfa8af975a18009e1304ec116390d94a6fb..047bd094ba68da012ec740cb91753e21d4ae3739 100644 (file)
@@ -313,7 +313,7 @@ void acpi_tb_parse_fadt(void)
        acpi_tb_install_standard_table((acpi_physical_address)acpi_gbl_FADT.
                                       Xdsdt,
                                       ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
-                                      FALSE, TRUE, &acpi_gbl_dsdt_index);
+                                      NULL, FALSE, TRUE, &acpi_gbl_dsdt_index);
 
        /* If Hardware Reduced flag is set, there is no FACS */
 
@@ -322,14 +322,14 @@ void acpi_tb_parse_fadt(void)
                        acpi_tb_install_standard_table((acpi_physical_address)
                                                       acpi_gbl_FADT.facs,
                                                       ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
-                                                      FALSE, TRUE,
+                                                      NULL, FALSE, TRUE,
                                                       &acpi_gbl_facs_index);
                }
                if (acpi_gbl_FADT.Xfacs) {
                        acpi_tb_install_standard_table((acpi_physical_address)
                                                       acpi_gbl_FADT.Xfacs,
                                                       ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
-                                                      FALSE, TRUE,
+                                                      NULL, FALSE, TRUE,
                                                       &acpi_gbl_xfacs_index);
                }
        }
index 8d1e5b572493f7c44745b864ed4b7f22208b1e6b..5649f493a1edd13ad6e89786d986605186195100 100644 (file)
@@ -79,6 +79,8 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
  * PARAMETERS:  address             - Address of the table (might be a virtual
  *                                    address depending on the table_flags)
  *              flags               - Flags for the table
+ *              table               - Pointer to the table (required for virtual
+ *                                    origins, optional for physical)
  *              reload              - Whether reload should be performed
  *              override            - Whether override should be performed
  *              table_index         - Where the table index is returned
@@ -96,6 +98,7 @@ acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
 acpi_status
 acpi_tb_install_standard_table(acpi_physical_address address,
                               u8 flags,
+                              struct acpi_table_header *table,
                               u8 reload, u8 override, u32 *table_index)
 {
        u32 i;
@@ -106,7 +109,8 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Acquire a temporary table descriptor for validation */
 
-       status = acpi_tb_acquire_temp_table(&new_table_desc, address, flags);
+       status =
+           acpi_tb_acquire_temp_table(&new_table_desc, address, flags, table);
        if (ACPI_FAILURE(status)) {
                ACPI_ERROR((AE_INFO,
                            "Could not acquire table length at %8.8X%8.8X",
@@ -209,7 +213,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
        if (ACPI_SUCCESS(status) && table) {
                acpi_tb_acquire_temp_table(&new_table_desc,
                                           ACPI_PTR_TO_PHYSADDR(table),
-                                          ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL);
+                                          ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+                                          table);
                ACPI_ERROR_ONLY(override_type = "Logical");
                goto finish_override;
        }
@@ -220,7 +225,8 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
                                                 &address, &length);
        if (ACPI_SUCCESS(status) && address && length) {
                acpi_tb_acquire_temp_table(&new_table_desc, address,
-                                          ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL);
+                                          ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+                                          NULL);
                ACPI_ERROR_ONLY(override_type = "Physical");
                goto finish_override;
        }
@@ -289,7 +295,8 @@ void acpi_tb_uninstall_table(struct acpi_table_desc *table_desc)
 
        if ((table_desc->flags & ACPI_TABLE_ORIGIN_MASK) ==
            ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL) {
-               ACPI_FREE(ACPI_PHYSADDR_TO_PTR(table_desc->address));
+               ACPI_FREE(table_desc->pointer);
+               table_desc->pointer = NULL;
        }
 
        table_desc->address = ACPI_PTR_TO_PHYSADDR(NULL);
index 254823d494a24667d121b0bbb6a2791728ad0182..4dac16bd63d3424c3148873c84f305f27b4a8ef5 100644 (file)
@@ -101,7 +101,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
                ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
                           header->signature, ACPI_FORMAT_UINT64(address),
                           header->length));
-       } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
+       } else if (ACPI_VALIDATE_RSDP_SIG(ACPI_CAST_PTR(struct acpi_table_rsdp,
+                                                       header)->signature)) {
 
                /* RSDP has no common fields */
 
index 4b9b329a5a922d94490f649fc5bcecf0d55480c6..5e8d50a4b6a9a1f4d1ae020039338547c7a5654b 100644 (file)
@@ -328,7 +328,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
 
                status = acpi_tb_install_standard_table(address,
                                                        ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
-                                                       FALSE, TRUE,
+                                                       NULL, FALSE, TRUE,
                                                        &table_index);
 
                if (ACPI_SUCCESS(status) &&
index 38623049b962de4b046be5869b7c8749f83b9602..87356d9ad613db918086bce50fe94842701c10c4 100644 (file)
@@ -227,9 +227,7 @@ unlock_and_exit:
  *
  * FUNCTION:    acpi_install_table
  *
- * PARAMETERS:  address             - Address of the ACPI table to be installed.
- *              physical            - Whether the address is a physical table
- *                                    address or not
+ * PARAMETERS:  table               - Pointer to the ACPI table to be installed.
  *
  * RETURN:      Status
  *
@@ -240,28 +238,54 @@ unlock_and_exit:
  ******************************************************************************/
 
 acpi_status ACPI_INIT_FUNCTION
-acpi_install_table(acpi_physical_address address, u8 physical)
+acpi_install_table(struct acpi_table_header *table)
 {
        acpi_status status;
-       u8 flags;
        u32 table_index;
 
        ACPI_FUNCTION_TRACE(acpi_install_table);
 
-       if (physical) {
-               flags = ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL;
-       } else {
-               flags = ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL;
-       }
-
-       status = acpi_tb_install_standard_table(address, flags,
-                                               FALSE, FALSE, &table_index);
+       status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
+                                               ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+                                               table, FALSE, FALSE,
+                                               &table_index);
 
        return_ACPI_STATUS(status);
 }
 
 ACPI_EXPORT_SYMBOL_INIT(acpi_install_table)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_physical_table
+ *
+ * PARAMETERS:  address             - Address of the ACPI table to be installed.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dynamically install an ACPI table.
+ *              Note: This function should only be invoked after
+ *                    acpi_initialize_tables() and before acpi_load_tables().
+ *
+ ******************************************************************************/
+acpi_status ACPI_INIT_FUNCTION
+acpi_install_physical_table(acpi_physical_address address)
+{
+       acpi_status status;
+       u32 table_index;
+
+       ACPI_FUNCTION_TRACE(acpi_install_physical_table);
+
+       status = acpi_tb_install_standard_table(address,
+                                               ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL,
+                                               NULL, FALSE, FALSE,
+                                               &table_index);
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL_INIT(acpi_install_physical_table)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_load_table
@@ -298,7 +322,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table, u32 *table_idx)
        ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
        status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
                                                ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
-                                               FALSE, &table_index);
+                                               table, FALSE, &table_index);
        if (table_idx) {
                *table_idx = table_index;
        }
index e5ba9795ec696e62042222ceb7d3ee5537127aa2..8d7736d2d2699c8b1a80598b2294be73b29f4210 100644 (file)
@@ -422,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
                        ACPI_WARNING((AE_INFO,
                                      "Obj %p, Reference Count is already zero, cannot decrement\n",
                                      object));
+                       return;
                }
 
                ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS,
index fa923a9292244b416d4d54baa346578d06e0a5db..b64014b4203e24d19b2d48690d961d3581af70cf 100644 (file)
@@ -1320,6 +1320,7 @@ static int __init acpi_init(void)
                pr_debug("%s: kset create error\n", __func__);
 
        init_prmt();
+       acpi_init_pcc();
        result = acpi_bus_init();
        if (result) {
                kobject_put(acpi_kobj);
index 19b33c028f356bb971cdf20ded6a2e7ff52f946a..cc6c97e7dcaeb42cb66f56f21852b74caa55ccd3 100644 (file)
@@ -285,14 +285,12 @@ EXPORT_SYMBOL(acpi_device_set_power);
 
 int acpi_bus_set_power(acpi_handle handle, int state)
 {
-       struct acpi_device *device;
-       int result;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
 
-       result = acpi_bus_get_device(handle, &device);
-       if (result)
-               return result;
+       if (device)
+               return acpi_device_set_power(device, state);
 
-       return acpi_device_set_power(device, state);
+       return -ENODEV;
 }
 EXPORT_SYMBOL(acpi_bus_set_power);
 
@@ -410,21 +408,20 @@ EXPORT_SYMBOL_GPL(acpi_device_update_power);
 
 int acpi_bus_update_power(acpi_handle handle, int *state_p)
 {
-       struct acpi_device *device;
-       int result;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
 
-       result = acpi_bus_get_device(handle, &device);
-       return result ? result : acpi_device_update_power(device, state_p);
+       if (device)
+               return acpi_device_update_power(device, state_p);
+
+       return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(acpi_bus_update_power);
 
 bool acpi_bus_power_manageable(acpi_handle handle)
 {
-       struct acpi_device *device;
-       int result;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
 
-       result = acpi_bus_get_device(handle, &device);
-       return result ? false : device->flags.power_manageable;
+       return device && device->flags.power_manageable;
 }
 EXPORT_SYMBOL(acpi_bus_power_manageable);
 
@@ -543,11 +540,9 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
 
 bool acpi_bus_can_wakeup(acpi_handle handle)
 {
-       struct acpi_device *device;
-       int result;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
 
-       result = acpi_bus_get_device(handle, &device);
-       return result ? false : device->wakeup.flags.valid;
+       return device && device->wakeup.flags.valid;
 }
 EXPORT_SYMBOL(acpi_bus_can_wakeup);
 
index c8e9b962e18cefb2943b86a7cb5cec75f29e4452..a89bdbe0018442f0c7d57dc6d920f078c4078128 100644 (file)
@@ -489,9 +489,8 @@ static ssize_t docked_show(struct device *dev,
                           struct device_attribute *attr, char *buf)
 {
        struct dock_station *dock_station = dev->platform_data;
-       struct acpi_device *adev = NULL;
+       struct acpi_device *adev = acpi_fetch_acpi_dev(dock_station->handle);
 
-       acpi_bus_get_device(dock_station->handle, &adev);
        return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
 }
 static DEVICE_ATTR_RO(docked);
index cb7b900d9466c84fa01b7d680936f0233a4fde7e..d54fb8e54671d1d14cb3126c44fc2434369e828f 100644 (file)
@@ -606,12 +606,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
                               int *polarity, char **name)
 {
-       int result;
-       struct acpi_device *device;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_pci_link *link;
 
-       result = acpi_bus_get_device(handle, &device);
-       if (result) {
+       if (!device) {
                acpi_handle_err(handle, "Invalid link device\n");
                return -1;
        }
@@ -658,12 +656,10 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
  */
 int acpi_pci_link_free_irq(acpi_handle handle)
 {
-       struct acpi_device *device;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_pci_link *link;
-       acpi_status result;
 
-       result = acpi_bus_get_device(handle, &device);
-       if (result) {
+       if (!device) {
                acpi_handle_err(handle, "Invalid link device\n");
                return -1;
        }
index ab2f7dfb0c44429f2b4296c9435bc25edd97b86c..b76db99cced3bea6d97c54e3a705b048e1342e02 100644 (file)
@@ -67,11 +67,10 @@ static struct acpi_scan_handler pci_root_handler = {
  */
 int acpi_is_root_bridge(acpi_handle handle)
 {
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        int ret;
-       struct acpi_device *device;
 
-       ret = acpi_bus_get_device(handle, &device);
-       if (ret)
+       if (!device)
                return 0;
 
        ret = acpi_match_device_ids(device, root_device_ids);
@@ -215,11 +214,10 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
 
 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
 {
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_pci_root *root;
-       struct acpi_device *device;
 
-       if (acpi_bus_get_device(handle, &device) ||
-           acpi_match_device_ids(device, root_device_ids))
+       if (!device || acpi_match_device_ids(device, root_device_ids))
                return NULL;
 
        root = acpi_driver_data(device);
@@ -324,7 +322,7 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
  * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
  * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
  * @mask: Mask of _OSC bits to request control of, place to store control mask.
- * @req: Mask of _OSC bits the control of is essential to the caller.
+ * @support: _OSC supported capability.
  *
  * Run _OSC query for @mask and if that is successful, compare the returned
  * mask of control bits with @req.  If all of the @req bits are set in the
index 5dcb02ededbc591056a84f73439b2359073e70b6..8c4a73a1351e8db15b480eaa8ff15a566f465e95 100644 (file)
@@ -81,9 +81,9 @@ struct acpi_power_resource *to_power_resource(struct acpi_device *device)
 
 static struct acpi_power_resource *acpi_power_get_context(acpi_handle handle)
 {
-       struct acpi_device *device;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
 
-       if (acpi_bus_get_device(handle, &device))
+       if (!device)
                return NULL;
 
        return to_power_resource(device);
@@ -716,6 +716,9 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
 
        mutex_lock(&acpi_device_lock);
 
+       dev_dbg(&dev->dev, "Enabling wakeup power (count %d)\n",
+               dev->wakeup.prepare_count);
+
        if (dev->wakeup.prepare_count++)
                goto out;
 
@@ -734,8 +737,11 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
        if (err) {
                acpi_power_off_list(&dev->wakeup.resources);
                dev->wakeup.prepare_count = 0;
+               goto out;
        }
 
+       dev_dbg(&dev->dev, "Wakeup power enabled\n");
+
  out:
        mutex_unlock(&acpi_device_lock);
        return err;
@@ -757,6 +763,9 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
 
        mutex_lock(&acpi_device_lock);
 
+       dev_dbg(&dev->dev, "Disabling wakeup power (count %d)\n",
+               dev->wakeup.prepare_count);
+
        /* Do nothing if wakeup power has not been enabled for this device. */
        if (dev->wakeup.prepare_count <= 0)
                goto out;
@@ -782,8 +791,11 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
        if (err) {
                dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
                dev->wakeup.flags.valid = 0;
+               goto out;
        }
 
+       dev_dbg(&dev->dev, "Wakeup power disabled\n");
+
  out:
        mutex_unlock(&acpi_device_lock);
        return err;
@@ -916,15 +928,14 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource
 
 struct acpi_device *acpi_add_power_resource(acpi_handle handle)
 {
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_power_resource *resource;
-       struct acpi_device *device = NULL;
        union acpi_object acpi_object;
        struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
        acpi_status status;
        u8 state_dummy;
        int result;
 
-       acpi_bus_get_device(handle, &device);
        if (device)
                return device;
 
index 77541f939be3e464838409ec7d18d4a43736cf63..368a9edefd0cbafcae1fdda5fbf568e3d45f254a 100644 (file)
@@ -98,8 +98,13 @@ static int acpi_soft_cpu_online(unsigned int cpu)
        struct acpi_processor *pr = per_cpu(processors, cpu);
        struct acpi_device *device;
 
-       if (!pr || acpi_bus_get_device(pr->handle, &device))
+       if (!pr)
+               return 0;
+
+       device = acpi_fetch_acpi_dev(pr->handle);
+       if (!device)
                return 0;
+
        /*
         * CPU got physically hotplugged and onlined for the first time:
         * Initialize missing things.
@@ -125,9 +130,8 @@ static int acpi_soft_cpu_online(unsigned int cpu)
 static int acpi_soft_cpu_dead(unsigned int cpu)
 {
        struct acpi_processor *pr = per_cpu(processors, cpu);
-       struct acpi_device *device;
 
-       if (!pr || acpi_bus_get_device(pr->handle, &device))
+       if (!pr || !acpi_fetch_acpi_dev(pr->handle))
                return 0;
 
        acpi_processor_reevaluate_tstate(pr, true);
index 76ef1bcc884809943b705c3d5ccd4554162bcfcd..86560a28751b2f7aec9158c04d908bf1adb6b95c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/tick.h>
 #include <linux/cpuidle.h>
 #include <linux/cpu.h>
+#include <linux/minmax.h>
 #include <acpi/processor.h>
 
 /*
@@ -400,13 +401,10 @@ static int acpi_cst_latency_cmp(const void *a, const void *b)
 static void acpi_cst_latency_swap(void *a, void *b, int n)
 {
        struct acpi_processor_cx *x = a, *y = b;
-       u32 tmp;
 
        if (!(x->valid && y->valid))
                return;
-       tmp = x->latency;
-       x->latency = y->latency;
-       y->latency = tmp;
+       swap(x->latency, y->latency);
 }
 
 static int acpi_processor_power_verify(struct acpi_processor *pr)
@@ -567,7 +565,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 {
        struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 
-       ACPI_FLUSH_CPU_CACHE();
+       if (cx->type == ACPI_STATE_C3)
+               ACPI_FLUSH_CPU_CACHE();
 
        while (1) {
 
@@ -1101,7 +1100,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
 
        status = acpi_get_parent(handle, &pr_ahandle);
        while (ACPI_SUCCESS(status)) {
-               acpi_bus_get_device(pr_ahandle, &d);
+               d = acpi_fetch_acpi_dev(pr_ahandle);
                handle = pr_ahandle;
 
                if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
index a3d34e3f9f94bef6158e8057f3b8caaf457ab015..d8b2dfcd59b5ffa40e0d154a0dedbc3c8cf7f54f 100644 (file)
@@ -53,10 +53,17 @@ static int phys_package_first_cpu(int cpu)
 
 static int cpu_has_cpufreq(unsigned int cpu)
 {
-       struct cpufreq_policy policy;
-       if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
+       struct cpufreq_policy *policy;
+
+       if (!acpi_processor_cpufreq_init)
                return 0;
-       return 1;
+
+       policy = cpufreq_cpu_get(cpu);
+       if (policy) {
+               cpufreq_cpu_put(policy);
+               return 1;
+       }
+       return 0;
 }
 
 static int cpufreq_get_max_state(unsigned int cpu)
index 2366f54d8e9cf8b263613839a3a9e39419508473..d0986bda29640be63d27e1dafca7460483c2009d 100644 (file)
@@ -687,9 +687,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                if (index)
                        return -EINVAL;
 
-               ret = acpi_bus_get_device(obj->reference.handle, &device);
-               if (ret)
-                       return ret == -ENODEV ? -EINVAL : ret;
+               device = acpi_fetch_acpi_dev(obj->reference.handle);
+               if (!device)
+                       return -EINVAL;
 
                args->fwnode = acpi_fwnode_handle(device);
                args->nargs = 0;
@@ -719,9 +719,8 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
                if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
                        struct fwnode_handle *ref_fwnode;
 
-                       ret = acpi_bus_get_device(element->reference.handle,
-                                                 &device);
-                       if (ret)
+                       device = acpi_fetch_acpi_dev(element->reference.handle);
+                       if (!device)
                                return -EINVAL;
 
                        nargs = 0;
index 3c25ce8c95ba1b19a256166109a9f34c724c5d72..c2d4947844250cf003c315067b42686f390d9439 100644 (file)
@@ -791,9 +791,9 @@ static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
 {
        struct resource *res = context;
        struct acpi_device **consumer = (struct acpi_device **) ret;
-       struct acpi_device *adev;
+       struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
 
-       if (acpi_bus_get_device(handle, &adev))
+       if (!adev)
                return AE_OK;
 
        if (acpi_dev_consumes_res(adev, res)) {
index 2c80765670bc7fc35508ad91347f8343f5f1f1d6..4dd3a9efcd0fa728e1cb16d670ec9834321d1a33 100644 (file)
@@ -135,12 +135,12 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
 static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
                                    void **ret_p)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_device_physical_node *pn;
        bool second_pass = (bool)data;
        acpi_status status = AE_OK;
 
-       if (acpi_bus_get_device(handle, &device))
+       if (!device)
                return AE_OK;
 
        if (device->handler && !device->handler->hotplug.enabled) {
@@ -180,10 +180,10 @@ static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
 static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
                                   void **ret_p)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        struct acpi_device_physical_node *pn;
 
-       if (acpi_bus_get_device(handle, &device))
+       if (!device)
                return AE_OK;
 
        mutex_lock(&device->physical_node_lock);
@@ -599,6 +599,19 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
 }
 EXPORT_SYMBOL(acpi_bus_get_device);
 
+/**
+ * acpi_fetch_acpi_dev - Retrieve ACPI device object.
+ * @handle: ACPI handle associated with the requested ACPI device object.
+ *
+ * Return a pointer to the ACPI device object associated with @handle, if
+ * present, or NULL otherwise.
+ */
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle)
+{
+       return handle_to_device(handle, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_fetch_acpi_dev);
+
 static void get_acpi_device(void *dev)
 {
        acpi_dev_get(dev);
@@ -799,7 +812,7 @@ static const char * const acpi_ignore_dep_ids[] = {
 
 static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *device;
        acpi_status status;
 
        /*
@@ -814,7 +827,9 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
                status = acpi_get_parent(handle, &handle);
                if (ACPI_FAILURE(status))
                        return status == AE_NULL_ENTRY ? NULL : acpi_root;
-       } while (acpi_bus_get_device(handle, &device));
+
+               device = acpi_fetch_acpi_dev(handle);
+       } while (!device);
        return device;
 }
 
@@ -1340,11 +1355,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                if (info->valid & ACPI_VALID_HID) {
                        acpi_add_id(pnp, info->hardware_id.string);
                        pnp->type.platform_id = 1;
-               }
-               if (info->valid & ACPI_VALID_CID) {
-                       cid_list = &info->compatible_id_list;
-                       for (i = 0; i < cid_list->count; i++)
-                               acpi_add_id(pnp, cid_list->ids[i].string);
+                       if (info->valid & ACPI_VALID_CID) {
+                               cid_list = &info->compatible_id_list;
+                               for (i = 0; i < cid_list->count; i++)
+                                       acpi_add_id(pnp, cid_list->ids[i].string);
+                       }
                }
                if (info->valid & ACPI_VALID_ADR) {
                        pnp->bus_address = info->address;
@@ -1695,6 +1710,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
 {
        struct list_head resource_list;
        bool is_serial_bus_slave = false;
+       static const struct acpi_device_id ignore_serial_bus_ids[] = {
        /*
         * These devices have multiple I2cSerialBus resources and an i2c-client
         * must be instantiated for each, each with its own i2c_device_id.
@@ -1703,11 +1719,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
         * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
         * which i2c_device_id to use for each resource.
         */
-       static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
                {"BSG1160", },
                {"BSG2150", },
                {"INT33FE", },
                {"INT3515", },
+       /*
+        * HIDs of device with an UartSerialBusV2 resource for which userspace
+        * expects a regular tty cdev to be created (instead of the in kernel
+        * serdev) and which have a kernel driver which expects a platform_dev
+        * such as the rfkill-gpio driver.
+        */
+               {"BCM4752", },
+               {"LNV4752", },
                {}
        };
 
@@ -1721,8 +1744,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
             fwnode_property_present(&device->fwnode, "baud")))
                return true;
 
-       /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
-       if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
+       if (!acpi_match_device_ids(device, ignore_serial_bus_ids))
                return false;
 
        INIT_LIST_HEAD(&resource_list);
@@ -2003,11 +2025,10 @@ static bool acpi_bus_scan_second_pass;
 static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                                      struct acpi_device **adev_p)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *device = acpi_fetch_acpi_dev(handle);
        acpi_object_type acpi_type;
        int type;
 
-       acpi_bus_get_device(handle, &device);
        if (device)
                goto out;
 
@@ -2548,8 +2569,8 @@ int __init acpi_scan_init(void)
        if (result)
                goto out;
 
-       result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
-       if (result)
+       acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
+       if (!acpi_root)
                goto out;
 
        /* Fixed feature devices do not exist on HW-reduced platform */
index eaa47753b75840b70661830216c85050793f2245..4b8454f26ca11863dd536e4afb2cdbb3e6da8220 100644 (file)
@@ -73,7 +73,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
                acpi_set_waking_vector(acpi_wakeup_address);
 
        }
-       ACPI_FLUSH_CPU_CACHE();
 #endif
        pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
        acpi_enable_wakeup_devices(acpi_state);
@@ -566,8 +565,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
        u32 acpi_state = acpi_target_sleep_state;
        int error;
 
-       ACPI_FLUSH_CPU_CACHE();
-
        trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
        switch (acpi_state) {
        case ACPI_STATE_S1:
@@ -903,8 +900,6 @@ static int acpi_hibernation_enter(void)
 {
        acpi_status status = AE_OK;
 
-       ACPI_FLUSH_CPU_CACHE();
-
        /* This shouldn't return.  If it returns, we have a problem */
        status = acpi_enter_sleep_state(ACPI_STATE_S4);
        /* Reprogram control registers */
index 71419eb16e09fab1fe18e7cb6d46fc2930d0763d..2fa8f611d0a74db2b376e6dfc2a66848fe3c031d 100644 (file)
@@ -723,7 +723,7 @@ static void __init acpi_table_initrd_scan(void)
                /*
                 * Mark the table to avoid being used in
                 * acpi_table_initrd_override(). Though this is not possible
-                * because override is disabled in acpi_install_table().
+                * because override is disabled in acpi_install_physical_table().
                 */
                if (test_and_set_bit(table_index, acpi_initrd_installed)) {
                        acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
@@ -734,7 +734,7 @@ static void __init acpi_table_initrd_scan(void)
                        table->signature, table->oem_id,
                        table->oem_table_id);
                acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
-               acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+               acpi_install_physical_table(acpi_tables_addr + table_offset);
 next_table:
                table_offset += table_length;
                table_index++;
index 95105db642b9823dbdcab603c516d7f6edf1df6e..75cda13152359ab6014fac41b64b10f75dc997e6 100644 (file)
@@ -697,7 +697,6 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
        struct acpi_device *device = cdev->devdata;
        struct acpi_thermal *tz = thermal->devdata;
        struct acpi_device *dev;
-       acpi_status status;
        acpi_handle handle;
        int i;
        int j;
@@ -715,8 +714,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
                for (i = 0; i < tz->trips.passive.devices.count;
                    i++) {
                        handle = tz->trips.passive.devices.handles[i];
-                       status = acpi_bus_get_device(handle, &dev);
-                       if (ACPI_FAILURE(status) || dev != device)
+                       dev = acpi_fetch_acpi_dev(handle);
+                       if (dev != device)
                                continue;
                        if (bind)
                                result =
@@ -741,8 +740,8 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
                    j < tz->trips.active[i].devices.count;
                    j++) {
                        handle = tz->trips.active[i].devices.handles[j];
-                       status = acpi_bus_get_device(handle, &dev);
-                       if (ACPI_FAILURE(status) || dev != device)
+                       dev = acpi_fetch_acpi_dev(handle);
+                       if (dev != device)
                                continue;
                        if (bind)
                                result = thermal_zone_bind_cooling_device
index 068e393ea0c678590d8cb24f9ef8864c2b7b8f89..4f64713e9917b6b01e601622c97c87eae1e61ce2 100644 (file)
@@ -59,18 +59,16 @@ static void acpi_video_parse_cmdline(void)
 static acpi_status
 find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
+       struct acpi_device *acpi_dev = acpi_fetch_acpi_dev(handle);
        long *cap = context;
        struct pci_dev *dev;
-       struct acpi_device *acpi_dev;
 
        static const struct acpi_device_id video_ids[] = {
                {ACPI_VIDEO_HID, 0},
                {"", 0},
        };
-       if (acpi_bus_get_device(handle, &acpi_dev))
-               return AE_OK;
 
-       if (!acpi_match_device_ids(acpi_dev, video_ids)) {
+       if (acpi_dev && !acpi_match_device_ids(acpi_dev, video_ids)) {
                dev = acpi_get_pci_dev(handle);
                if (!dev)
                        return AE_OK;
index 1c48358b43ba30306a84894bfadb4444e7896176..abc06e7f89d834eca0789c934ffe1bd915759bd7 100644 (file)
@@ -293,9 +293,9 @@ static void lpi_check_constraints(void)
 
        for (i = 0; i < lpi_constraints_table_size; ++i) {
                acpi_handle handle = lpi_constraints_table[i].handle;
-               struct acpi_device *adev;
+               struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
 
-               if (!handle || acpi_bus_get_device(handle, &adev))
+               if (!adev)
                        continue;
 
                acpi_handle_debug(handle,
index f22f23933063b7e57072bb8606b849be74c1639b..ef9ee8bbe4e6ca166b605654963f52b72c855c3c 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/platform_device.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include "../internal.h"
@@ -160,3 +161,113 @@ bool force_storage_d3(void)
 {
        return x86_match_cpu(storage_d3_cpu_ids);
 }
+
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+/*
+ * x86 ACPI boards which ship with only Android as their factory image usually
+ * declare a whole bunch of bogus I2C devices in their ACPI tables and sometimes
+ * there are issues with serdev devices on these boards too, e.g. the resource
+ * points to the wrong serdev_controller.
+ *
+ * Instantiating I2C / serdev devs for these bogus devs causes various issues,
+ * e.g. GPIO/IRQ resource conflicts because sometimes drivers do bind to them.
+ * The Android x86 kernel fork shipped on these devices has some special code
+ * to remove the bogus I2C clients (and AFAICT serdevs are ignored completely).
+ *
+ * The acpi_quirk_skip_*_enumeration() functions below are used by the I2C or
+ * serdev code to skip instantiating any I2C or serdev devs on broken boards.
+ *
+ * In case of I2C an exception is made for HIDs on the i2c_acpi_known_good_ids
+ * list. These are known to always be correct (and in case of the audio-codecs
+ * the drivers heavily rely on the codec being enumerated through ACPI).
+ *
+ * Note these boards typically do actually have I2C and serdev devices,
+ * just different ones then the ones described in their DSDT. The devices
+ * which are actually present are manually instantiated by the
+ * drivers/platform/x86/x86-android-tablets.c kernel module.
+ */
+#define ACPI_QUIRK_SKIP_I2C_CLIENTS                            BIT(0)
+#define ACPI_QUIRK_UART1_TTY_UART2_SKIP                                BIT(1)
+
+static const struct dmi_system_id acpi_skip_serial_bus_enumeration_ids[] = {
+       {
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"),
+               },
+               .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+                                       ACPI_QUIRK_UART1_TTY_UART2_SKIP),
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+               },
+               .driver_data = (void *)ACPI_QUIRK_SKIP_I2C_CLIENTS,
+       },
+       {
+               /* Whitelabel (sold as various brands) TM800A550L */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+                       /* Above strings are too generic, also match on BIOS version */
+                       DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"),
+               },
+               .driver_data = (void *)ACPI_QUIRK_SKIP_I2C_CLIENTS,
+       },
+       {}
+};
+
+static const struct acpi_device_id i2c_acpi_known_good_ids[] = {
+       { "10EC5640", 0 }, /* RealTek ALC5640 audio codec */
+       { "INT33F4", 0 },  /* X-Powers AXP288 PMIC */
+       { "INT33FD", 0 },  /* Intel Crystal Cove PMIC */
+       { "NPCE69A", 0 },  /* Asus Transformer keyboard dock */
+       {}
+};
+
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+{
+       const struct dmi_system_id *dmi_id;
+       long quirks;
+
+       dmi_id = dmi_first_match(acpi_skip_serial_bus_enumeration_ids);
+       if (!dmi_id)
+               return false;
+
+       quirks = (unsigned long)dmi_id->driver_data;
+       if (!(quirks & ACPI_QUIRK_SKIP_I2C_CLIENTS))
+               return false;
+
+       return acpi_match_device_ids(adev, i2c_acpi_known_good_ids);
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_i2c_client_enumeration);
+
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+       struct acpi_device *adev = ACPI_COMPANION(controller_parent);
+       const struct dmi_system_id *dmi_id;
+       long quirks = 0;
+
+       *skip = false;
+
+       /* !dev_is_platform() to not match on PNP enumerated debug UARTs */
+       if (!adev || !adev->pnp.unique_id || !dev_is_platform(controller_parent))
+               return 0;
+
+       dmi_id = dmi_first_match(acpi_skip_serial_bus_enumeration_ids);
+       if (dmi_id)
+               quirks = (unsigned long)dmi_id->driver_data;
+
+       if (quirks & ACPI_QUIRK_UART1_TTY_UART2_SKIP) {
+               if (!strcmp(adev->pnp.unique_id, "1"))
+                       return -ENODEV; /* Create tty cdev instead of serdev */
+
+               if (!strcmp(adev->pnp.unique_id, "2"))
+                       *skip = true;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
+#endif
index cffbe57a8e08637c32db0f8dede33a26c9b88277..c75fb600740cc1ee5c97598c090f658324d3e722 100644 (file)
@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
        __release(&t->lock);
 
        /*
-        * If this thread used poll, make sure we remove the waitqueue
-        * from any epoll data structures holding it with POLLFREE.
-        * waitqueue_active() is safe to use here because we're holding
-        * the inner lock.
+        * If this thread used poll, make sure we remove the waitqueue from any
+        * poll data structures holding it.
         */
-       if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
-           waitqueue_active(&thread->wait)) {
-               wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
-       }
+       if (thread->looper & BINDER_LOOPER_STATE_POLL)
+               wake_up_pollfree(&thread->wait);
 
        binder_inner_proc_unlock(thread->proc);
 
        /*
-        * This is needed to avoid races between wake_up_poll() above and
-        * and ep_remove_waitqueue() called for other reasons (eg the epoll file
-        * descriptor being closed); ep_remove_waitqueue() holds an RCU read
-        * lock, so we can be sure it's done after calling synchronize_rcu().
+        * This is needed to avoid races between wake_up_pollfree() above and
+        * someone else removing the last entry from the queue for other reasons
+        * (e.g. ep_remove_wait_queue() being called due to an epoll file
+        * descriptor being closed).  Such other users hold an RCU read lock, so
+        * we can be sure they're done after we call synchronize_rcu().
         */
        if (thread->looper & BINDER_LOOPER_STATE_POLL)
                synchronize_rcu();
index 50b56cd0039d287267269250a04b184328e2159b..e9c7c07fd84c8f063d317aadd54e321afac327d3 100644 (file)
@@ -94,6 +94,7 @@ struct ceva_ahci_priv {
 static unsigned int ceva_ahci_read_id(struct ata_device *dev,
                                        struct ata_taskfile *tf, u16 *id)
 {
+       __le16 *__id = (__le16 *)id;
        u32 err_mask;
 
        err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
         * Since CEVA controller does not support device sleep feature, we
         * need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
         */
-       id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+       __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
 
        return 0;
 }
index 59ad8c979cb300af14a204e6560b8cdb2acc8751..aba0c67d1bd6563d34250e4b8b11114612e91dcf 100644 (file)
@@ -3920,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "VRFDFC22048UCHC-TE*", NULL,          ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
+       /* Similar story with ASMedia 1092 */
+       { "ASMT109x- Config",   NULL,           ATA_HORKAGE_DISABLE },
 
        /* Weird ATAPI devices */
        { "TORiSAN DVD-ROM DRD-N216", NULL,     ATA_HORKAGE_MAX_SEC_128 },
index 5b78e86e345924f387ee0b0708819fb0ded4a813..b9c77885b8726ee0c43b5318602d95353397b176 100644 (file)
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
        if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                        ata_lpm_policy_names[ap->target_lpm_policy]);
 }
 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
index 1b84d5526d77a40ff5451e26c6d6b37cf26d94ef..313e9475507b5a7f1f7505331066e32ecf371f0c 100644 (file)
@@ -2859,8 +2859,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
                goto invalid_fld;
        }
 
-       if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
-               tf->protocol = ATA_PROT_NCQ_NODATA;
+       if ((cdb[2 + cdb_offset] & 0x3) == 0) {
+               /*
+                * When T_LENGTH is zero (No data is transferred), dir should
+                * be DMA_NONE.
+                */
+               if (scmd->sc_data_direction != DMA_NONE) {
+                       fp = 2 + cdb_offset;
+                       goto invalid_fld;
+               }
+
+               if (ata_is_ncq(tf->protocol))
+                       tf->protocol = ATA_PROT_NCQ_NODATA;
+       }
 
        /* enable LBA */
        tf->flags |= ATA_TFLAG_LBA;
index 121635aa8c00c10e8f5e42cc5855d173971cf4e7..823c88622e34a089e670a8bb9d7dc0838ced78d5 100644 (file)
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
        /* Transfer multiple of 2 bytes */
        if (rw == READ) {
                if (swap)
-                       raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_insw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_insw(data_addr, (u16 *)buf, words);
        } else {
                if (swap)
-                       raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw_swapw(data_addr, (u16 *)buf, words);
                else
-                       raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+                       raw_outsw(data_addr, (u16 *)buf, words);
        }
 
        /* Transfer trailing byte, if any. */
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
 
                if (rw == READ) {
                        if (swap)
-                               raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_insw(data_addr, (u16 *)pad, 1);
                        *buf = pad[0];
                } else {
                        pad[0] = *buf;
                        if (swap)
-                               raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw_swapw(data_addr, (u16 *)pad, 1);
                        else
-                               raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+                               raw_outsw(data_addr, (u16 *)pad, 1);
                }
                words++;
        }
index e5838b23c9e0a177712283e3c5df5087f4ff1da0..3b31a4f596d865f0a9a7ea9022729aa0a84f0ca9 100644 (file)
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
        return 0;
 }
 
+static void sata_fsl_host_stop(struct ata_host *host)
+{
+        struct sata_fsl_host_priv *host_priv = host->private_data;
+
+        iounmap(host_priv->hcr_base);
+        kfree(host_priv);
+}
+
 /*
  * scsi mid-layer and libata interface structures
  */
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
        .port_start = sata_fsl_port_start,
        .port_stop = sata_fsl_port_stop,
 
+       .host_stop      = sata_fsl_host_stop,
+
        .pmp_attach = sata_fsl_pmp_attach,
        .pmp_detach = sata_fsl_pmp_detach,
 };
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->ssr_base = ssr_base;
        host_priv->csr_base = csr_base;
 
-       irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (!irq) {
-               dev_err(&ofdev->dev, "invalid irq from platform\n");
+       irq = platform_get_irq(ofdev, 0);
+       if (irq < 0) {
+               retval = irq;
                goto error_exit_with_cleanup;
        }
        host_priv->irq = irq;
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
 
        ata_host_detach(host);
 
-       irq_dispose_mapping(host_priv->irq);
-       iounmap(host_priv->hcr_base);
-       kfree(host_priv);
-
        return 0;
 }
 
index f4d0c555de29b40b708cf1f80be19d8f4d832148..04ea92cbd9cfd47dcb1b72aa0d2cce52a63e5bc4 100644 (file)
@@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
        device_block_probing();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_list)) {
+       while (!list_empty(&dpm_list) && !error) {
                struct device *dev = to_device(dpm_list.next);
 
                get_device(dev);
index a154cab6cd989808b5cae51fd3d12506a6d52f9c..c3a36cfaa855a679f8f7d215852c8981aea3e2f7 100644 (file)
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
        int ret;
 
        if (idx < 0) {
-               pr_warn("deleting an unspecified loop device is not supported.\n");
+               pr_warn_once("deleting an unspecified loop device is not supported.\n");
                return -EINVAL;
        }
                
index 8e3983e456f3cdb4c93883fe06a6f474e34d7b3b..286cf1afad7815d7e0b58815b58e7aa519d9d160 100644 (file)
@@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        unsigned long flags;
        struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
        struct blkfront_info *info = rinfo->dev_info;
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
 
-       if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+       if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+               xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
                return IRQ_HANDLED;
+       }
 
        spin_lock_irqsave(&rinfo->ring_lock, flags);
  again:
@@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                unsigned long id;
                unsigned int op;
 
+               eoiflag = 0;
+
                RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
                id = bret.id;
 
@@ -1646,6 +1651,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
 
+       xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 
  err:
@@ -1653,6 +1660,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
 
+       /* No EOI in order to avoid further interrupts. */
+
        pr_alert("%s disabled for further use\n", info->gd->disk_name);
        return IRQ_HANDLED;
 }
@@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
        if (err)
                goto fail;
 
-       err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
-                                       "blkif", rinfo);
+       err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
+                                               0, "blkif", rinfo);
        if (err <= 0) {
                xenbus_dev_fatal(dev, err,
                                 "bind_evtchn_to_irqhandler failed");
index fb99e3727155b6059fe4629ffa812e2ab7a1c4a3..547e6e769546a45d3d62ab8b6de97ba0c82e7493 100644 (file)
@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
 }
 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
 
-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
 {
        struct mhi_chan *itr, *tmp;
        struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
        if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
                return -EIO;
 
-       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
-               return -EINVAL;
+       if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+               dev_warn(dev, "Resuming from non M3 state (%s)\n",
+                        TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
+               if (!force)
+                       return -EINVAL;
+       }
 
        /* Notify clients about exiting LPM */
        list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
 
        return 0;
 }
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, false);
+}
 EXPORT_SYMBOL_GPL(mhi_pm_resume);
 
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+       return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
 {
        int ret;
index 59a4896a803096bce6634d4c1a0825fb30f72a54..4c577a73170910566668c4db85a30267f5d55fff 100644 (file)
@@ -20,7 +20,7 @@
 
 #define MHI_PCI_DEFAULT_BAR_NUM 0
 
-#define MHI_POST_RESET_DELAY_MS 500
+#define MHI_POST_RESET_DELAY_MS 2000
 
 #define HEALTH_CHECK_PERIOD (HZ * 2)
 
index 6f225dddc74f4becf1054e711fac9c94410a9836..4566e730ef2b81c6dc88f623644ebf657819acde 100644 (file)
@@ -687,11 +687,11 @@ err_clk_disable:
 
 static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
 {
-       /* Keep the clock and PM reference counts consistent. */
-       if (pm_runtime_status_suspended(rsb->dev))
-               pm_runtime_resume(rsb->dev);
        reset_control_assert(rsb->rstc);
-       clk_disable_unprepare(rsb->clk);
+
+       /* Keep the clock and PM reference counts consistent. */
+       if (!pm_runtime_status_suspended(rsb->dev))
+               clk_disable_unprepare(rsb->clk);
 }
 
 static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
index ed3c4c42fc23b7f8280394060a44006b9d5adcf4..d68d05d5d38388523b0fecfdd45cb6e32488c784 100644 (file)
@@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
         return 0;
 }
 
-static int
+static int __init
 lba_find_capability(int cap)
 {
        struct _parisc_agp_info *info = &parisc_agp_info;
@@ -366,7 +366,7 @@ fail:
        return error;
 }
 
-static int
+static int __init
 find_quicksilver(struct device *dev, void *data)
 {
        struct parisc_device **lba = data;
@@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
        return 0;
 }
 
-static int
+static int __init
 parisc_agp_init(void)
 {
        extern struct sba_device *sba_list;
index deed355422f4e9561e10ac0a1d62cc42649bffde..c59265146e9c87b0bbd8e222fc557515f4b48d6a 100644 (file)
@@ -191,6 +191,8 @@ struct ipmi_user {
        struct work_struct remove_work;
 };
 
+static struct workqueue_struct *remove_work_wq;
+
 static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
        __acquires(user->release_barrier)
 {
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
        struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
 
        /* SRCU cleanup must happen in task context. */
-       schedule_work(&user->remove_work);
+       queue_work(remove_work_wq, &user->remove_work);
 }
 
 static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -3029,7 +3031,7 @@ cleanup_bmc_device(struct kref *ref)
         * with removing the device attributes while reading a device
         * attribute.
         */
-       schedule_work(&bmc->remove_work);
+       queue_work(remove_work_wq, &bmc->remove_work);
 }
 
 /*
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
                /* We didn't find a user, deliver an error response. */
                ipmi_inc_stat(intf, unhandled_commands);
 
-               msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
-               msg->data[1] = msg->rsp[2];
-               msg->data[2] = msg->rsp[4] & ~0x3;
+               msg->data[0] = (netfn + 1) << 2;
+               msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+               msg->data[1] = msg->rsp[1]; /* Addr */
+               msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+               msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
                msg->data[3] = cmd;
                msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
                msg->data_size = 5;
@@ -4455,13 +4459,24 @@ return_unspecified:
                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
                msg->rsp_size = 3;
        } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
-               /* commands must have at least 3 bytes, responses 4. */
-               if (is_cmd && (msg->rsp_size < 3)) {
+               /* commands must have at least 4 bytes, responses 5. */
+               if (is_cmd && (msg->rsp_size < 4)) {
                        ipmi_inc_stat(intf, invalid_commands);
                        goto out;
                }
-               if (!is_cmd && (msg->rsp_size < 4))
-                       goto return_unspecified;
+               if (!is_cmd && (msg->rsp_size < 5)) {
+                       ipmi_inc_stat(intf, invalid_ipmb_responses);
+                       /* Construct a valid error response. */
+                       msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+                       msg->rsp[0] |= (1 << 2); /* Make it a response */
+                       msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+                       msg->rsp[1] = msg->data[1]; /* Addr */
+                       msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+                       msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+                       msg->rsp[3] = msg->data[3]; /* Cmd */
+                       msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+                       msg->rsp_size = 5;
+               }
        } else if ((msg->data_size >= 2)
            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
            && (msg->data[1] == IPMI_SEND_MSG_CMD)
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
        if (rv) {
                rv->done = free_smi_msg;
                rv->user_data = NULL;
+               rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
                atomic_inc(&smi_msg_inuse_count);
        }
        return rv;
@@ -5376,7 +5392,16 @@ static int ipmi_init_msghandler(void)
        if (initialized)
                goto out;
 
-       init_srcu_struct(&ipmi_interfaces_srcu);
+       rv = init_srcu_struct(&ipmi_interfaces_srcu);
+       if (rv)
+               goto out;
+
+       remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+       if (!remove_work_wq) {
+               pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+               rv = -ENOMEM;
+               goto out_wq;
+       }
 
        timer_setup(&ipmi_timer, ipmi_timeout, 0);
        mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
@@ -5385,6 +5410,9 @@ static int ipmi_init_msghandler(void)
 
        initialized = true;
 
+out_wq:
+       if (rv)
+               cleanup_srcu_struct(&ipmi_interfaces_srcu);
 out:
        mutex_unlock(&ipmi_interfaces_mutex);
        return rv;
@@ -5408,6 +5436,8 @@ static void __exit cleanup_ipmi(void)
        int count;
 
        if (initialized) {
+               destroy_workqueue(remove_work_wq);
+
                atomic_notifier_chain_unregister(&panic_notifier_list,
                                                 &panic_block);
 
index 0c62e578749ef9349cc0196b951ed3f443ad587b..48aab77abebf1f0d55236ec6fc223d004ec944d7 100644 (file)
@@ -1659,6 +1659,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
                }
        }
 
+       ssif_info->client = client;
+       i2c_set_clientdata(client, ssif_info);
+
        rv = ssif_check_and_remove(client, ssif_info);
        /* If rv is 0 and addr source is not SI_ACPI, continue probing */
        if (!rv && ssif_info->addr_source == SI_ACPI) {
@@ -1679,9 +1682,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
                ipmi_addr_src_to_str(ssif_info->addr_source),
                client->addr, client->adapter->name, slave_addr);
 
-       ssif_info->client = client;
-       i2c_set_clientdata(client, ssif_info);
-
        /* Now check for system interface capabilities */
        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
        msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -1881,6 +1881,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
                dev_err(&ssif_info->client->dev,
                        "Unable to start IPMI SSIF: %d\n", rv);
+               i2c_set_clientdata(client, NULL);
                kfree(ssif_info);
        }
        kfree(resp);
index f467d63bbf1eefdbc937a324eb477547d6deb5c6..566ee2c78709e344f7361ce5108f4f6d1d7b7f71 100644 (file)
@@ -3418,6 +3418,14 @@ static int __clk_core_init(struct clk_core *core)
 
        clk_prepare_lock();
 
+       /*
+        * Set hw->core after grabbing the prepare_lock to synchronize with
+        * callers of clk_core_fill_parent_index() where we treat hw->core
+        * being NULL as the clk not being registered yet. This is crucial so
+        * that clks aren't parented until their parent is fully registered.
+        */
+       core->hw->core = core;
+
        ret = clk_pm_runtime_get(core);
        if (ret)
                goto unlock;
@@ -3582,8 +3590,10 @@ static int __clk_core_init(struct clk_core *core)
 out:
        clk_pm_runtime_put(core);
 unlock:
-       if (ret)
+       if (ret) {
                hlist_del_init(&core->child_node);
+               core->hw->core = NULL;
+       }
 
        clk_prepare_unlock();
 
@@ -3847,7 +3857,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
        core->num_parents = init->num_parents;
        core->min_rate = 0;
        core->max_rate = ULONG_MAX;
-       hw->core = core;
 
        ret = clk_core_populate_parent_map(core, init);
        if (ret)
@@ -3865,7 +3874,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
                goto fail_create_clk;
        }
 
-       clk_core_link_consumer(hw->core, hw->clk);
+       clk_core_link_consumer(core, hw->clk);
 
        ret = __clk_core_init(core);
        if (!ret)
index d3e905cf867d7eb9c553b772c321202cc1f1ab40..b23758083ce52d37d74393a24bc213a4e5ac23de 100644 (file)
@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
        .probe = imx8qxp_lpcg_clk_probe,
 };
 
-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+module_platform_driver(imx8qxp_lpcg_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
index c53a688d8ccca0bc54b6561d27816277a1273bf0..40a2efb1329be4ae998d53b38cc60f994d02bac4 100644 (file)
@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
        },
        .probe = imx8qxp_clk_probe,
 };
-builtin_platform_driver(imx8qxp_clk_driver);
+module_platform_driver(imx8qxp_clk_driver);
 
 MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
 MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
index eaedcceb766f91e9ba9ad5d61b23088f15cacaf5..8f65b9bdafce4f38dc2c58f681bd1e8c6d92de8f 100644 (file)
@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
 void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
                             const struct alpha_pll_config *config)
 {
+       /*
+        * If the bootloader left the PLL enabled it's likely that there are
+        * RCGs that will lock up if we disable the PLL below.
+        */
+       if (trion_pll_is_enabled(pll, regmap)) {
+               pr_debug("Trion PLL is already enabled, skipping configuration\n");
+               return;
+       }
+
        clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
        regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
        clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
index b2d00b4519634614d2b6e1dfd6e1f648440e0321..45d9cca28064fb89465ceaa453ca31cac911c46b 100644 (file)
@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
        val &= mask;
 
        if (mux->parent_map)
-               return qcom_find_src_index(hw, mux->parent_map, val);
+               return qcom_find_cfg_index(hw, mux->parent_map, val);
 
        return val;
 }
index 0932e019dd12ee9c77d5ce7f4a8b6efb92b9aef9..75f09e6e057e1a9413b63f37230d47ab348b8c01 100644 (file)
@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
 }
 EXPORT_SYMBOL_GPL(qcom_find_src_index);
 
+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
+{
+       int i, num_parents = clk_hw_get_num_parents(hw);
+
+       for (i = 0; i < num_parents; i++)
+               if (cfg == map[i].cfg)
+                       return i;
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
+
 struct regmap *
 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
index bb39a7e106d8a94950aabb25ab3885f16a421ddf..9c8f7b798d9fc92ceea235e93a68bb3e08d138de 100644 (file)
@@ -49,6 +49,8 @@ extern void
 qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
                               u8 src);
+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
+                              u8 cfg);
 
 extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
                                      const char *name, unsigned long rate);
index 543cfab7561f9248555ec8e1021b5100c23c94ec..431b55bb0d2f796544ab3245d2dde822b2ea0b1d 100644 (file)
@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
                .name = "gcc_sdcc1_apps_clk_src",
                .parent_data = gcc_parent_data_1,
                .num_parents = ARRAY_SIZE(gcc_parent_data_1),
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
                .name = "gcc_sdcc1_ice_core_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = ARRAY_SIZE(gcc_parent_data_0),
-               .ops = &clk_rcg2_floor_ops,
+               .ops = &clk_rcg2_ops,
        },
 };
 
index d52f976dc875f2986556fa493a1a8c433450110f..d5cb372f0901c0ae637de88e0025560db8db96e8 100644 (file)
@@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
 
        regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
        if (IS_ERR(regclk)) {
-               kfree(name);
                pr_err("error setting up syscon ICST clock %s\n", name);
+               kfree(name);
                return;
        }
        of_clk_add_provider(np, of_clk_src_simple_get, regclk);
index 9a04eacc4412ba169610ed496a30a9b7f0c5b9ab..1ecd52f903b8ddab27275f4ee9b60e05ebbd1787 100644 (file)
@@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
 
 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
 
-static void erratum_set_next_event_generic(const int access, unsigned long evt,
-                                               struct clock_event_device *clk)
+/*
+ * Force the inlining of this function so that the register accesses
+ * can be themselves correctly inlined.
+ */
+static __always_inline
+void erratum_set_next_event_generic(const int access, unsigned long evt,
+                                   struct clock_event_device *clk)
 {
        unsigned long ctrl;
        u64 cval;
index 3819ef5b709894621d4f18c7ecd7145595d4d46b..3245eb0c602d24c7aaccbe915a1da518ab58b98b 100644 (file)
@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
                        pr_warn("pclk for %pOFn is present, but could not be activated\n",
                                np);
 
-       if (!of_property_read_u32(np, "clock-freq", rate) &&
+       if (!of_property_read_u32(np, "clock-freq", rate) ||
            !of_property_read_u32(np, "clock-frequency", rate))
                return 0;
 
index e338d2f010feb2a8978fc3ddfa22970d28aa15e5..096c3848fa415a5c6d22a14a85aa636926d3f226 100644 (file)
@@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
        .release        = cpufreq_sysfs_release,
 };
 
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+                               struct device *dev)
 {
-       struct device *dev = get_cpu_device(cpu);
-
        if (unlikely(!dev))
                return;
 
@@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
 
        if (policy->max_freq_req) {
                /*
-                * CPUFREQ_CREATE_POLICY notification is sent only after
-                * successfully adding max_freq_req request.
+                * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+                * notification, since CPUFREQ_CREATE_POLICY notification was
+                * sent after adding max_freq_req earlier.
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
@@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
        if (new_policy) {
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
-                       add_cpu_dev_symlink(policy, j);
+                       add_cpu_dev_symlink(policy, j, get_cpu_device(j));
                }
 
                policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        /* Create sysfs link on CPU registration */
        policy = per_cpu(cpufreq_cpu_data, cpu);
        if (policy)
-               add_cpu_dev_symlink(policy, cpu);
+               add_cpu_dev_symlink(policy, cpu, dev);
 
        return 0;
 }
index fa768f10635fdc8f8461ffa800556f38186fbdc0..fd29861526d6bebea82826f3b82b904bf0718bee 100644 (file)
@@ -211,6 +211,12 @@ static u32 uof_get_ae_mask(u32 obj_num)
        return adf_4xxx_fw_config[obj_num].ae_mask;
 }
 
+static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+       /* For the moment do not report vf2pf sources */
+       return 0;
+}
+
 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
 {
        hw_data->dev_class = &adf_4xxx_class;
@@ -254,6 +260,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
        hw_data->set_msix_rttable = set_msix_default_rttable;
        hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
        hw_data->enable_pfvf_comms = pfvf_comms_disabled;
+       hw_data->get_vf2pf_sources = get_vf2pf_sources;
        hw_data->disable_iov = adf_disable_sriov;
        hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
 
index f57a39ddd0635e8b830d8b51a6e0c3b69933024c..ab7fd896d2c43dd0d8635d0d2cbc544d6cfd13ab 100644 (file)
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
        int i;
 
        table = &buffer->sg_table;
-       for_each_sg(table->sgl, sg, table->nents, i) {
+       for_each_sgtable_sg(table, sg, i) {
                struct page *page = sg_page(sg);
 
                __free_pages(page, compound_order(page));
index cd0d745eb0714e6f1c756bc2cbb09c775e7023ba..33baf1591a490590da48dfafc02a7f1b684e0818 100644 (file)
@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
                                      struct axi_dma_desc *first)
 {
        u32 priority = chan->chip->dw->hdata->priority[chan->id];
-       struct axi_dma_chan_config config;
+       struct axi_dma_chan_config config = {};
        u32 irq_mask;
        u8 lms = 0; /* Select AXI0 master for LLI fetching */
 
@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
        config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
        config.prior = priority;
        config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
-       config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
+       config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
        switch (chan->direction) {
        case DMA_MEM_TO_DEV:
                dw_axi_dma_set_byte_halfword(chan, true);
index 198f6cd8ac1be18bd35a6ef534d3e34d90336bd0..cee7aa231d7b19e4dbe0f0af91eb28a2f0989318 100644 (file)
@@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
 
        /* DMA configuration */
        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (!err) {
+       if (err) {
                pci_err(pdev, "DMA mask 64 set failed\n");
                return err;
-       } else {
-               pci_err(pdev, "DMA mask 64 set failed\n");
-
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       pci_err(pdev, "DMA mask 32 set failed\n");
-                       return err;
-               }
        }
 
        /* Data structure allocation */
index 17f2f8a31b6303638f0e29db18b980b3f8b961e4..cf2c8bc4f147aaacb6c0bd7e0f4f7c895f49fd79 100644 (file)
@@ -137,10 +137,10 @@ halt:
                        INIT_WORK(&idxd->work, idxd_device_reinit);
                        queue_work(idxd->wq, &idxd->work);
                } else {
-                       spin_lock(&idxd->dev_lock);
                        idxd->state = IDXD_DEV_HALTED;
                        idxd_wqs_quiesce(idxd);
                        idxd_wqs_unmap_portal(idxd);
+                       spin_lock(&idxd->dev_lock);
                        idxd_device_clear_state(idxd);
                        dev_err(&idxd->pdev->dev,
                                "idxd halted, need %s.\n",
index de76fb4abac24af94f143f5255ff51aa8fe93c37..83452fbbb168b156ff75f481124198564c3c21d3 100644 (file)
@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
 {
        struct idxd_desc *d, *t, *found = NULL;
        struct llist_node *head;
+       LIST_HEAD(flist);
 
        desc->completion->status = IDXD_COMP_DESC_ABORT;
        /*
@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
                                found = desc;
                                continue;
                        }
-                       list_add_tail(&desc->list, &ie->work_list);
+
+                       if (d->completion->status)
+                               list_add_tail(&d->list, &flist);
+                       else
+                               list_add_tail(&d->list, &ie->work_list);
                }
        }
 
@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
 
        if (found)
                complete_desc(found, IDXD_COMPLETE_ABORT);
+
+       /*
+        * complete_desc() will return desc to allocator and the desc can be
+        * acquired by a different process and the desc->list can be modified.
+        * Delete desc from list so the list trasversing does not get corrupted
+        * by the other process.
+        */
+       list_for_each_entry_safe(d, t, &flist, list) {
+               list_del_init(&d->list);
+               complete_desc(d, IDXD_COMPLETE_NORMAL);
+       }
 }
 
 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
index 962b6e05287b5f46f03dcb7df7a0f56cf9c7c80c..d95c421877fb7361b1da14c8ed8a93e34faea985 100644 (file)
@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
index 041d8e32d6300551210769a339d1acafd4ad5a8d..6e56d1cef5eeec0343a65dc9688935cb691647d5 100644 (file)
@@ -4534,45 +4534,60 @@ static int udma_setup_resources(struct udma_dev *ud)
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
        if (IS_ERR(rm_res)) {
                bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+               irq_res.sets = 1;
        } else {
                bitmap_fill(ud->tchan_map, ud->tchan_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->tchan_map,
                                                  &rm_res->desc[i], "tchan");
+               irq_res.sets = rm_res->sets;
        }
-       irq_res.sets = rm_res->sets;
 
        /* rchan and matching default flow ranges */
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
        if (IS_ERR(rm_res)) {
                bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+               irq_res.sets++;
        } else {
                bitmap_fill(ud->rchan_map, ud->rchan_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->rchan_map,
                                                  &rm_res->desc[i], "rchan");
+               irq_res.sets += rm_res->sets;
        }
 
-       irq_res.sets += rm_res->sets;
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
-       for (i = 0; i < rm_res->sets; i++) {
-               irq_res.desc[i].start = rm_res->desc[i].start;
-               irq_res.desc[i].num = rm_res->desc[i].num;
-               irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
-               irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[0].start = 0;
+               irq_res.desc[0].num = ud->tchan_cnt;
+               i = 1;
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       irq_res.desc[i].start = rm_res->desc[i].start;
+                       irq_res.desc[i].num = rm_res->desc[i].num;
+                       irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+                       irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+               }
        }
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
-       for (j = 0; j < rm_res->sets; j++, i++) {
-               if (rm_res->desc[j].num) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                       ud->soc_data->oes.udma_rchan;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-               }
-               if (rm_res->desc[j].num_sec) {
-                       irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
-                                       ud->soc_data->oes.udma_rchan;
-                       irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[i].start = 0;
+               irq_res.desc[i].num = ud->rchan_cnt;
+       } else {
+               for (j = 0; j < rm_res->sets; j++, i++) {
+                       if (rm_res->desc[j].num) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                               ud->soc_data->oes.udma_rchan;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+                       }
+                       if (rm_res->desc[j].num_sec) {
+                               irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+                                               ud->soc_data->oes.udma_rchan;
+                               irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+                       }
                }
        }
        ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -4690,14 +4705,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+                       irq_res.sets++;
                } else {
                        bitmap_fill(ud->bchan_map, ud->bchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->bchan_map,
                                                          &rm_res->desc[i],
                                                          "bchan");
+                       irq_res.sets += rm_res->sets;
                }
-               irq_res.sets += rm_res->sets;
        }
 
        /* tchan ranges */
@@ -4705,14 +4721,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+                       irq_res.sets += 2;
                } else {
                        bitmap_fill(ud->tchan_map, ud->tchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->tchan_map,
                                                          &rm_res->desc[i],
                                                          "tchan");
+                       irq_res.sets += rm_res->sets * 2;
                }
-               irq_res.sets += rm_res->sets * 2;
        }
 
        /* rchan ranges */
@@ -4720,47 +4737,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
                rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
                if (IS_ERR(rm_res)) {
                        bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+                       irq_res.sets += 2;
                } else {
                        bitmap_fill(ud->rchan_map, ud->rchan_cnt);
                        for (i = 0; i < rm_res->sets; i++)
                                udma_mark_resource_ranges(ud, ud->rchan_map,
                                                          &rm_res->desc[i],
                                                          "rchan");
+                       irq_res.sets += rm_res->sets * 2;
                }
-               irq_res.sets += rm_res->sets * 2;
        }
 
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        if (ud->bchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
-               for (i = 0; i < rm_res->sets; i++) {
-                       irq_res.desc[i].start = rm_res->desc[i].start +
-                                               oes->bcdma_bchan_ring;
-                       irq_res.desc[i].num = rm_res->desc[i].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[0].start = oes->bcdma_bchan_ring;
+                       irq_res.desc[0].num = ud->bchan_cnt;
+                       i = 1;
+               } else {
+                       for (i = 0; i < rm_res->sets; i++) {
+                               irq_res.desc[i].start = rm_res->desc[i].start +
+                                                       oes->bcdma_bchan_ring;
+                               irq_res.desc[i].num = rm_res->desc[i].num;
+                       }
                }
        }
        if (ud->tchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
-               for (j = 0; j < rm_res->sets; j++, i += 2) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                               oes->bcdma_tchan_data;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-
-                       irq_res.desc[i + 1].start = rm_res->desc[j].start +
-                                               oes->bcdma_tchan_ring;
-                       irq_res.desc[i + 1].num = rm_res->desc[j].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[i].start = oes->bcdma_tchan_data;
+                       irq_res.desc[i].num = ud->tchan_cnt;
+                       irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
+                       irq_res.desc[i + 1].num = ud->tchan_cnt;
+                       i += 2;
+               } else {
+                       for (j = 0; j < rm_res->sets; j++, i += 2) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                                       oes->bcdma_tchan_data;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+
+                               irq_res.desc[i + 1].start = rm_res->desc[j].start +
+                                                       oes->bcdma_tchan_ring;
+                               irq_res.desc[i + 1].num = rm_res->desc[j].num;
+                       }
                }
        }
        if (ud->rchan_cnt) {
                rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
-               for (j = 0; j < rm_res->sets; j++, i += 2) {
-                       irq_res.desc[i].start = rm_res->desc[j].start +
-                                               oes->bcdma_rchan_data;
-                       irq_res.desc[i].num = rm_res->desc[j].num;
-
-                       irq_res.desc[i + 1].start = rm_res->desc[j].start +
-                                               oes->bcdma_rchan_ring;
-                       irq_res.desc[i + 1].num = rm_res->desc[j].num;
+               if (IS_ERR(rm_res)) {
+                       irq_res.desc[i].start = oes->bcdma_rchan_data;
+                       irq_res.desc[i].num = ud->rchan_cnt;
+                       irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
+                       irq_res.desc[i + 1].num = ud->rchan_cnt;
+                       i += 2;
+               } else {
+                       for (j = 0; j < rm_res->sets; j++, i += 2) {
+                               irq_res.desc[i].start = rm_res->desc[j].start +
+                                                       oes->bcdma_rchan_data;
+                               irq_res.desc[i].num = rm_res->desc[j].num;
+
+                               irq_res.desc[i + 1].start = rm_res->desc[j].start +
+                                                       oes->bcdma_rchan_ring;
+                               irq_res.desc[i + 1].num = rm_res->desc[j].num;
+                       }
                }
        }
 
@@ -4858,39 +4900,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
        if (IS_ERR(rm_res)) {
                /* all rflows are assigned exclusively to Linux */
                bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+               irq_res.sets = 1;
        } else {
                bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->rflow_in_use,
                                                  &rm_res->desc[i], "rflow");
+               irq_res.sets = rm_res->sets;
        }
-       irq_res.sets = rm_res->sets;
 
        /* tflow ranges */
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
        if (IS_ERR(rm_res)) {
                /* all tflows are assigned exclusively to Linux */
                bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+               irq_res.sets++;
        } else {
                bitmap_fill(ud->tflow_map, ud->tflow_cnt);
                for (i = 0; i < rm_res->sets; i++)
                        udma_mark_resource_ranges(ud, ud->tflow_map,
                                                  &rm_res->desc[i], "tflow");
+               irq_res.sets += rm_res->sets;
        }
-       irq_res.sets += rm_res->sets;
 
        irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+       if (!irq_res.desc)
+               return -ENOMEM;
        rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
-       for (i = 0; i < rm_res->sets; i++) {
-               irq_res.desc[i].start = rm_res->desc[i].start +
-                                       oes->pktdma_tchan_flow;
-               irq_res.desc[i].num = rm_res->desc[i].num;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[0].start = oes->pktdma_tchan_flow;
+               irq_res.desc[0].num = ud->tflow_cnt;
+               i = 1;
+       } else {
+               for (i = 0; i < rm_res->sets; i++) {
+                       irq_res.desc[i].start = rm_res->desc[i].start +
+                                               oes->pktdma_tchan_flow;
+                       irq_res.desc[i].num = rm_res->desc[i].num;
+               }
        }
        rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
-       for (j = 0; j < rm_res->sets; j++, i++) {
-               irq_res.desc[i].start = rm_res->desc[j].start +
-                                       oes->pktdma_rchan_flow;
-               irq_res.desc[i].num = rm_res->desc[j].num;
+       if (IS_ERR(rm_res)) {
+               irq_res.desc[i].start = oes->pktdma_rchan_flow;
+               irq_res.desc[i].num = ud->rflow_cnt;
+       } else {
+               for (j = 0; j < rm_res->sets; j++, i++) {
+                       irq_res.desc[i].start = rm_res->desc[j].start +
+                                               oes->pktdma_rchan_flow;
+                       irq_res.desc[i].num = rm_res->desc[j].num;
+               }
        }
        ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
        kfree(irq_res.desc);
index 51201600d789b9cf6dd4e6c72b59597625b8550d..800673910b5111c837e5aa98dec0096f8848dfc9 100644 (file)
@@ -16,7 +16,6 @@ struct scpi_pm_domain {
        struct generic_pm_domain genpd;
        struct scpi_ops *ops;
        u32 domain;
-       char name[30];
 };
 
 /*
@@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
 
                scpi_pd->domain = i;
                scpi_pd->ops = scpi_ops;
-               sprintf(scpi_pd->name, "%pOFn.%d", np, i);
-               scpi_pd->genpd.name = scpi_pd->name;
+               scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+                                                    "%pOFn.%d", np, i);
+               if (!scpi_pd->genpd.name) {
+                       dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+                               np, i);
+                       continue;
+               }
                scpi_pd->genpd.power_off = scpi_pd_power_off;
                scpi_pd->genpd.power_on = scpi_pd_power_on;
 
index 6d66fe03fb6afbbd3c6dd74236f24abee92eea8e..fd89899aeeed9ccbbba6ab5e96e202af55f65077 100644 (file)
@@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
        const char *root_path, *filename = NULL;
        char *root_path_buf;
        size_t root_len;
+       size_t root_path_buf_len = 512;
 
-       root_path_buf = kzalloc(512, GFP_KERNEL);
+       root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
        if (!root_path_buf)
                goto out;
 
        root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
-                               sizeof(root_path_buf));
+                               root_path_buf_len);
        if (IS_ERR(root_path))
                goto out;
 
index 026903e3ef54357914feced67373c86687c44a82..08b9e2cf4f2d629034c39ee466df34901666f302 100644 (file)
@@ -46,6 +46,7 @@
 struct dln2_gpio {
        struct platform_device *pdev;
        struct gpio_chip gpio;
+       struct irq_chip irqchip;
 
        /*
         * Cache pin direction to save us one transfer, since the hardware has
@@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
        mutex_unlock(&dln2->irq_lock);
 }
 
-static struct irq_chip dln2_gpio_irqchip = {
-       .name = "dln2-irq",
-       .irq_mask = dln2_irq_mask,
-       .irq_unmask = dln2_irq_unmask,
-       .irq_set_type = dln2_irq_set_type,
-       .irq_bus_lock = dln2_irq_bus_lock,
-       .irq_bus_sync_unlock = dln2_irq_bus_unlock,
-};
-
 static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
                            const void *data, int len)
 {
@@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev)
        dln2->gpio.direction_output = dln2_gpio_direction_output;
        dln2->gpio.set_config = dln2_gpio_set_config;
 
+       dln2->irqchip.name = "dln2-irq",
+       dln2->irqchip.irq_mask = dln2_irq_mask,
+       dln2->irqchip.irq_unmask = dln2_irq_unmask,
+       dln2->irqchip.irq_set_type = dln2_irq_set_type,
+       dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
+       dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
+
        girq = &dln2->gpio.irq;
-       girq->chip = &dln2_gpio_irqchip;
+       girq->chip = &dln2->irqchip;
        /* The event comes from the outside so no parent handler */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index 84f96b78f32af34d9ce45a2172a1953887b9c65c..9f4941bc57604bdacfd4e2605ec760965e2589e0 100644 (file)
@@ -100,11 +100,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
        virtqueue_kick(vgpio->request_vq);
        mutex_unlock(&vgpio->lock);
 
-       if (!wait_for_completion_timeout(&line->completion, HZ)) {
-               dev_err(dev, "GPIO operation timed out\n");
-               ret = -ETIMEDOUT;
-               goto out;
-       }
+       wait_for_completion(&line->completion);
 
        if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
                dev_err(dev, "GPIO request failed: %d\n", gpio);
index 71a6a9ef54ac79938c85db54a92efc287bf1207f..6348559608ce78bb5febda33b4cc69f9b99d9b12 100644 (file)
@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
        struct amdgpu_bo *bo;
-       struct drm_gem_object *gobj;
+       struct drm_gem_object *gobj = NULL;
        u32 domain, alloc_domain;
        u64 alloc_flags;
        int ret;
@@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
        remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
        drm_vma_node_revoke(&gobj->vma_node, drm_priv);
 err_node_allow:
-       drm_gem_object_put(gobj);
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
 err_bo_create:
        unreserve_mem_limit(adev, size, alloc_domain, !!sg);
 err_reserve_limit:
        mutex_destroy(&(*mem)->lock);
-       kfree(*mem);
+       if (gobj)
+               drm_gem_object_put(gobj);
+       else
+               kfree(*mem);
 err:
        if (sg) {
                sg_free_table(sg);
index d94fa748e6bbe6967f7452a999cefb2d70336bd4..1e651b9591419e340031ae52c20737de79905ed7 100644 (file)
@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
        /* disable all interrupts */
        amdgpu_irq_disable_all(adev);
        if (adev->mode_info.mode_config_initialized){
-               if (!amdgpu_device_has_dc_support(adev))
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
                        drm_helper_force_disable_all(adev_to_drm(adev));
                else
                        drm_atomic_helper_shutdown(adev_to_drm(adev));
@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 {
        int r;
 
+       amdgpu_amdkfd_pre_reset(adev);
+
        if (from_hypervisor)
                r = amdgpu_virt_request_full_gpu(adev, true);
        else
@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
 
        amdgpu_irq_gpu_reset_resume_helper(adev);
        r = amdgpu_ib_ring_tests(adev);
+       amdgpu_amdkfd_post_reset(adev);
 
 error:
        if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
 
-               amdgpu_amdkfd_pre_reset(tmp_adev);
+               if (!amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_pre_reset(tmp_adev);
 
                /*
                 * Mark these ASICs to be reseted as untracked first
@@ -5129,7 +5133,7 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
                }
 
-               if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+               if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
                }
 
@@ -5148,9 +5152,9 @@ skip_hw_reset:
 
 skip_sched_resume:
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               /* unlock kfd */
-               if (!need_emergency_restart)
-                       amdgpu_amdkfd_post_reset(tmp_adev);
+               /* unlock kfd: SRIOV would do it separately */
+               if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
+                       amdgpu_amdkfd_post_reset(tmp_adev);
 
                /* kfd_post_reset will do nothing if kfd device is not initialized,
                 * need to bring up kfd here if it's not be initialized before
index 503995c7ff6c1e6acd85eef8f3d298918e70c490..ea00090b3fb36f93e65c3c1d60c93a591c328eaa 100644 (file)
@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
        [HDP_HWIP]      = HDP_HWID,
        [SDMA0_HWIP]    = SDMA0_HWID,
        [SDMA1_HWIP]    = SDMA1_HWID,
+       [SDMA2_HWIP]    = SDMA2_HWID,
+       [SDMA3_HWIP]    = SDMA3_HWID,
        [MMHUB_HWIP]    = MMHUB_HWID,
        [ATHUB_HWIP]    = ATHUB_HWID,
        [NBIO_HWIP]     = NBIF_HWID,
@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 64):
                case IP_VERSION(3, 1, 1):
                case IP_VERSION(3, 0, 2):
+               case IP_VERSION(3, 0, 192):
                        amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
                        if (!amdgpu_sriov_vf(adev))
                                amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
index 4f7c70845785a9ed20efd1f5f78ab85e6a63d043..585961c2f5f27c34ccaf15d68a10a57367c1265a 100644 (file)
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
                break;
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
                        fw_name = FIRMWARE_SIENNA_CICHLID;
                else
index ce982afeff913e7ac064954a0a384eb06a5512f7..ac9a8cd21c4b64b2c90d51c830f3f21d9589d2f3 100644 (file)
@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
        int i = 0;
 
        for (i = 0; i < adev->mode_info.num_crtc; i++)
-               if (adev->mode_info.crtcs[i])
-                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+               if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
+                       hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
 
        kfree(adev->mode_info.bios_hardcoded_edid);
        kfree(adev->amdgpu_vkms_output);
index 34478bcc4d095cd94bd6e23d1e5fbab14cb799dd..edb3e3b08eed8fc6f241d7934d8db133d082ab5c 100644 (file)
@@ -3070,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
                              AMD_PG_SUPPORT_CP |
                              AMD_PG_SUPPORT_GDS |
                              AMD_PG_SUPPORT_RLC_SMU_HS)) {
-               WREG32(mmRLC_JUMP_TABLE_RESTORE,
-                      adev->gfx.rlc.cp_table_gpu_addr >> 8);
+               WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
+                            adev->gfx.rlc.cp_table_gpu_addr >> 8);
                gfx_v9_0_init_gfx_power_gating(adev);
        }
 }
@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
 
        gfx_v9_0_cp_enable(adev, false);
 
-       /* Skip suspend with A+A reset */
-       if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
-               dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+       /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
+       if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
+           (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
+               dev_dbg(adev->dev, "Skipping RLC halt\n");
                return 0;
        }
 
index 480e41847d7c0b199b1cbf30f6f7c9978dd26bc0..ec4d5e15b766a3effca3e93d4109803b0ba948b0 100644 (file)
@@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
index 14c1c1a297dd3d75c3f585d65e0afa497087e849..6e0ace2fbfab14092d7fa7cd413bb6580e562f4c 100644 (file)
@@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index e80d1dc4307909309a54c429548e8f3ca211f670..b4eddf6e98a6a23c861af2d4999d0c220d4a1e06 100644 (file)
@@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index cb82404df5342a640d0124271e578a878dfa1368..d84523cf5f75966276f318748b69c426ea827912 100644 (file)
@@ -1808,6 +1808,14 @@ static int gmc_v9_0_hw_fini(void *handle)
                return 0;
        }
 
+       /*
+        * Pair the operations did in gmc_v9_0_hw_init and thus maintain
+        * a correct cached state for GMC. Otherwise, the "gate" again
+        * operation on S3 resuming will fail due to wrong cached state.
+        */
+       if (adev->mmhub.funcs->update_power_gating)
+               adev->mmhub.funcs->update_power_gating(adev, false);
+
        amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
        amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
 
index a99953833820ea4c48e21a163794ad0131de5a35..1da2ec692057ee98445620a24b1c9953604fbe2c 100644 (file)
@@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
@@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
        if (amdgpu_sriov_vf(adev))
                return;
 
-       if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
-               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
-
-       }
+       if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
+               amdgpu_dpm_set_powergating_by_smu(adev,
+                                                 AMD_IP_BLOCK_TYPE_GMC,
+                                                 enable);
 }
 
 static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
index f80a14a1b82dc274549eabef38942cf107bf91d5..f5f7181f9af5fd1302365a4522b573576b701b13 100644 (file)
@@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
index 25f8e93e5ec3757828ffa1629c5bea813e7b63d3..3718ff610ab286e97045b42407aaf519258be3ea 100644 (file)
@@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index a11d60ec63215f13af9975fe879a818bf45d3059..9e16da28505afa5478b40793cfc6de049bd0e74f 100644 (file)
@@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC); /* UC, uncached */
 
index c4ef822bbe8c56dcc94670aab5942a768b1579ce..ff49eeaf78824534c5593614ed1e93cafe8c4fa3 100644 (file)
@@ -189,8 +189,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
                            ENABLE_ADVANCED_DRIVER_MODEL, 1);
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
                            SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
-       tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
-                           ECO_BITS, 0);
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
                            MTYPE, MTYPE_UC);/* XXX for emulation. */
        tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
index a6659d9ecdd220a212ebb80ee7203c214cf28548..2ec1ffb36b1fc54db2b840b2498a6963d1a36a69 100644 (file)
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
        switch (adev->ip_versions[UVD_HWIP][0]) {
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 0, 64):
+       case IP_VERSION(3, 0, 192):
                if (amdgpu_sriov_vf(adev)) {
                        if (encode)
                                *codecs = &sriov_sc_video_codecs_encode;
index 58b89b53ebe617c8dbab7dd95cac550cb82a8d5f..3cb4681c5f539abe9075df9a3c237dc62a3ac313 100644 (file)
@@ -1574,7 +1574,6 @@ retry_flush_work:
 static void svm_range_restore_work(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
-       struct amdkfd_process_info *process_info;
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct kfd_process *p;
@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
         * the lifetime of this thread, kfd_process and mm will be valid.
         */
        p = container_of(svms, struct kfd_process, svms);
-       process_info = p->kgd_process_info;
        mm = p->mm;
        if (!mm)
                return;
 
-       mutex_lock(&process_info->lock);
        svm_range_list_lock_and_flush_work(svms, mm);
        mutex_lock(&svms->lock);
 
@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
 out_reschedule:
        mutex_unlock(&svms->lock);
        mmap_write_unlock(mm);
-       mutex_unlock(&process_info->lock);
 
        /* If validation failed, reschedule another attempt */
        if (evicted_ranges) {
@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
 
        if (atomic_read(&svms->drain_pagefaults)) {
                pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+               r = 0;
                goto out;
        }
 
@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
        mm = get_task_mm(p->lead_thread);
        if (!mm) {
                pr_debug("svms 0x%p failed to get mm\n", svms);
+               r = 0;
                goto out;
        }
 
@@ -2660,6 +2658,7 @@ retry_write_locked:
 
        if (svm_range_skip_recover(prange)) {
                amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+               r = 0;
                goto out_unlock_range;
        }
 
@@ -2668,6 +2667,7 @@ retry_write_locked:
        if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
                pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
                         svms, prange->start, prange->last);
+               r = 0;
                goto out_unlock_range;
        }
 
@@ -3177,7 +3177,6 @@ static int
 svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
                   uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
 {
-       struct amdkfd_process_info *process_info = p->kgd_process_info;
        struct mm_struct *mm = current->mm;
        struct list_head update_list;
        struct list_head insert_list;
@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
 
        svms = &p->svms;
 
-       mutex_lock(&process_info->lock);
-
        svm_range_list_lock_and_flush_work(svms, mm);
 
        r = svm_range_is_valid(p, start, size);
@@ -3273,8 +3270,6 @@ out_unlock_range:
        mutex_unlock(&svms->lock);
        mmap_read_unlock(mm);
 out:
-       mutex_unlock(&process_info->lock);
-
        pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
                 &p->svms, start, start + size - 1, r);
 
index 1cd6b9f4a568c889d564cb43f4041fb8e0884147..e727f1dd2a9a7ca8840d4c427e31aab299c244bd 100644 (file)
@@ -1051,6 +1051,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                return 0;
        }
 
+       /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+       status = dmub_srv_hw_reset(dmub_srv);
+       if (status != DMUB_STATUS_OK)
+               DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
        hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
 
        fw_inst_const = dmub_fw->data +
@@ -2576,7 +2581,8 @@ static int dm_resume(void *handle)
                 */
                link_enc_cfg_init(dm->dc, dc_state);
 
-               amdgpu_dm_outbox_init(adev);
+               if (dc_enable_dmub_notifications(adev->dm.dc))
+                       amdgpu_dm_outbox_init(adev);
 
                r = dm_dmub_hw_init(adev);
                if (r)
@@ -2625,6 +2631,10 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_enable_dmub_notifications(adev->dm.dc))
+               amdgpu_dm_outbox_init(adev);
+
        /* Before powering on DC we need to re-initialize DMUB. */
        r = dm_dmub_hw_init(adev);
        if (r)
index cce062adc439149e3a808c920110f8409ab549f2..8a441a22c46ec7493910f7f6ead1cad05b14c778 100644 (file)
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                        ret = -EINVAL;
                        goto cleanup;
                }
+
+               if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+                               (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+                       DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+
        }
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
index 32a5ce09a62a9bb373b318f881e649250b951e99..cc34a35d0bcbfe2d9c6db547a160ffa7c0bba5e9 100644 (file)
@@ -36,6 +36,8 @@
 #include "dm_helpers.h"
 
 #include "dc_link_ddc.h"
+#include "ddc_service_types.h"
+#include "dpcd_defs.h"
 
 #include "i2caux_interface.h"
 #include "dmub_cmd.h"
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool needs_dsc_aux_workaround(struct dc_link *link)
+{
+       if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+           (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
+           link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
+               return true;
+
+       return false;
+}
+
 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
 {
        struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        u8 *dsc_branch_dec_caps = NULL;
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
-#if defined(CONFIG_HP_HOOK_WORKAROUND)
+
        /*
         * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
         * because it only check the dsc/fec caps of the "port variable" and not the dock
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
         * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
         *
         */
-
-       if (!aconnector->dsc_aux && !port->parent->port_parent)
+       if (!aconnector->dsc_aux && !port->parent->port_parent &&
+           needs_dsc_aux_workaround(aconnector->dc_link))
                aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
-#endif
+
        if (!aconnector->dsc_aux)
                return false;
 
index 60544788e911ee15969e0cefa1aeb630cf1f3f44..c8457babfdea428b57a6bd5084f61cd0fa5208a4 100644 (file)
@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
                        dal_ddc_service_set_transaction_type(link->ddc,
                                                             sink_caps->transaction_type);
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+                       /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+                        * reports DSC support.
+                        */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+                                       link->type == dc_connection_mst_branch &&
+                                       link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                                       link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+                                       !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+                               link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
                        /* In case of fallback to SST when topology discovery below fails
                         * HDCP caps will be querried again later by the upper layer (caller
@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        LINK_INFO("link=%d, mst branch is now Disconnected\n",
                                  link->link_index);
 
+                       /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+                               link->wa_flags.dpia_mst_dsc_always_on = false;
+
                        dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
 
                        link->mst_stream_alloc_table.stream_count = 0;
index cb7bf9148904edb02534b01db8ca1e8f90b966c8..13bc69d6b6791c4616131467f940c5c0e6272eaa 100644 (file)
@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
                }
 
                for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
-                       lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
+                       lt_settings->dpcd_lane_settings[lane].raw = 0;
        }
 
        if (status == LINK_TRAINING_SUCCESS) {
index c32fdccd4d925c96b4f1bda165453d29a0340feb..e2d9a46d0e1ad4ccf10cf5160fb070e6fffd170a 100644 (file)
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
        if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
                return false;
 
+       // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+       if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+               return false;
+
        return true;
 }
 
@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
 
        if (!new_ctx)
                return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
-       /*
-        * Update link encoder to stream assignment.
-        * TODO: Split out reason allocation from validation.
-        */
-       if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
-               dc->res_pool->funcs->link_encs_assign(
-                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
 
        if (dc->res_pool->funcs->validate_global) {
                result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
                if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
                        result = DC_FAIL_BANDWIDTH_VALIDATE;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /*
+        * Only update link encoder to stream assignment after bandwidth validation passed.
+        * TODO: Split out assignment and validation.
+        */
+       if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
+               dc->res_pool->funcs->link_encs_assign(
+                       dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
+#endif
+
        return result;
 }
 
index 3aac3f4a28525623382f21dd934dd39851baf1af..618e7989176fc86c964aa35a8df5d926b13b6c2f 100644 (file)
@@ -508,7 +508,8 @@ union dpia_debug_options {
                uint32_t disable_dpia:1;
                uint32_t force_non_lttpr:1;
                uint32_t extend_aux_rd_interval:1;
-               uint32_t reserved:29;
+               uint32_t disable_mst_dsc_work_around:1;
+               uint32_t reserved:28;
        } bits;
        uint32_t raw;
 };
index 180ecd860296b250fe40f6d040d6a2a33bb88817..fad3d883ed891c14e50f110f28893fd70dec9902 100644 (file)
@@ -191,6 +191,8 @@ struct dc_link {
                bool dp_skip_DID2;
                bool dp_skip_reset_segment;
                bool dp_mot_reset_segment;
+               /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+               bool dpia_mst_dsc_always_on;
        } wa_flags;
        struct link_mst_stream_allocation_table mst_stream_alloc_table;
 
@@ -224,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc,
        *edp_num = 0;
        for (i = 0; i < dc->link_count; i++) {
                // report any eDP links, even unconnected DDI's
+               if (!dc->links[i])
+                       continue;
                if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
                        edp_links[*edp_num] = dc->links[i];
                        if (++(*edp_num) == MAX_NUM_EDP)
index 05335a8c3c2dcffe54bb55bfebd9ba2a126e01a9..4f6e639e9353619e837aaf86d0dd36fc3461a38b 100644 (file)
@@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .z10_restore = dcn31_z10_restore,
        .z10_save_init = dcn31_z10_save_init,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
        .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
index 8d796ed3b7d16f40369363969499caf7bbb1bf94..619f8d305292045ecd41ee6f0e04e81e4d90942c 100644 (file)
@@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle,
                pp_dpm_powergate_vce(handle, gate);
                break;
        case AMD_IP_BLOCK_TYPE_GMC:
-               pp_dpm_powergate_mmhub(handle);
+               /*
+                * For now, this is only used on PICASSO.
+                * And only "gate" operation is supported.
+                */
+               if (gate)
+                       pp_dpm_powergate_mmhub(handle);
                break;
        case AMD_IP_BLOCK_TYPE_GFX:
                ret = pp_dpm_powergate_gfx(handle, gate);
index 01168b8955bff3ce80b1c7a6e6df04b050a9bcab..8a3244585d809372e2b179ae1d906a2a55005740 100644 (file)
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                        dev_err(adev->dev, "Failed to disable smu features.\n");
        }
 
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
+       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
            adev->gfx.rlc.funcs->stop)
                adev->gfx.rlc.funcs->stop(adev);
 
index d60b8c5e871575b2ea617e1c7c471f12b9020f38..43028f2cd28b5e57cda8d2a78294022c025b5318 100644 (file)
@@ -191,6 +191,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->watermarks_table);
        smu_table->watermarks_table = NULL;
 
+       kfree(smu_table->gpu_metrics_table);
+       smu_table->gpu_metrics_table = NULL;
+
        return 0;
 }
 
index 35145db6eedfc9e23df68d2ff71c11bd5a7e04d7..19a5d2c39c8d8ac170567b4a323db36ccc6f9e28 100644 (file)
@@ -198,6 +198,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
 
 int smu_v13_0_check_fw_version(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        uint32_t if_version = 0xff, smu_version = 0xff;
        uint16_t smu_major;
        uint8_t smu_minor, smu_debug;
@@ -210,6 +211,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
        smu_major = (smu_version >> 16) & 0xffff;
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
+       if (smu->is_apu)
+               adev->pm.fw_version = smu_version;
 
        switch (smu->adev->ip_versions[MP1_HWIP][0]) {
        case IP_VERSION(13, 0, 2):
index 1e30eaeb0e1b3a75d33eea41ad508d686284c4d1..d5c98f79d58d337f454bdf566e7d5738885554ca 100644 (file)
@@ -1121,7 +1121,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
        if (crtc->state)
                crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 
-       __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+       if (ast_state)
+               __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+       else
+               __drm_atomic_helper_crtc_reset(crtc, NULL);
 }
 
 static struct drm_crtc_state *
index 8e7a124d6c5a3ae27f18c268af7d02e4fa952b77..22bf690910b253be6322ae506bbaa4872d534735 100644 (file)
@@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info,
                               sizes->fb_width, sizes->fb_height);
 
        info->par = fb_helper;
-       snprintf(info->fix.id, sizeof(info->fix.id), "%s",
+       /*
+        * The DRM drivers fbdev emulation device name can be confusing if the
+        * driver name also has a "drm" suffix on it. Leading to names such as
+        * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
+        * be changed due user-space tools (e.g: pm-utils) matching against it.
+        */
+       snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
                 fb_helper->dev->driver->name);
 
 }
index 7b9f69f21f1eda7491f230a3be176aacce3d34e9..bca0de92802efa29892a631026589043dbf88ff1 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
index c9a9d74f338c1ca676bc7aba73e2f5f52dbfe8c2..c313a5b4549c4ea00801918a0f0e08be392c5ad8 100644 (file)
@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
-               if (!ret)
+               if (!ret) {
+                       /* If the requested seqno is already signaled
+                        * drm_syncobj_find_fence may return a NULL
+                        * fence. To make sure the recipient gets
+                        * signalled, use a new fence instead.
+                        */
+                       if (!*fence)
+                               *fence = dma_fence_get_stub();
+
                        goto out;
+               }
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
index 39e11eaec1a3f1cccade492af13a57abcea826a6..aa7238245b0ea108a0d4bacf357b189af672e161 100644 (file)
@@ -1640,6 +1640,9 @@ struct intel_dp {
        struct intel_dp_pcon_frl frl;
 
        struct intel_psr psr;
+
+       /* When we last wrote the OUI for eDP */
+       unsigned long last_oui_write;
 };
 
 enum lspcon_vendor {
index 2dc9d632969dbca821d4ec7d2f2d5f91aa6df422..aef69522f0be3db0cc626078c6ff9e387a7cbeb8 100644 (file)
@@ -596,7 +596,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
                        continue;
 
                offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
-               if (fw->size - offset < 0) {
+               if (offset > fw->size) {
                        drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
                        continue;
                }
index be883469d2fcc30299a211dafddc2f5c6c489c84..a552f05a67e58b157de60bc79110e92a2d393bf1 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/notifier.h>
 #include <linux/slab.h>
+#include <linux/timekeeping.h>
 #include <linux/types.h>
 
 #include <asm/byteorder.h>
@@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
 
        if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
                drm_err(&i915->drm, "Failed to write source OUI\n");
+
+       intel_dp->last_oui_write = jiffies;
+}
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
+       wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
 }
 
 /* If the device supports it, try to set the power state appropriately */
index ce229026dc91dccd795292de944b98ab3ea55ccc..b64145a3869a9eb5d14edbc20721415f8eb32638 100644 (file)
@@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
                                 const struct intel_crtc_state *crtc_state);
 void intel_dp_phy_test(struct intel_encoder *encoder);
 
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
+
 #endif /* __INTEL_DP_H__ */
index 569d17b4d00f0bd4136604cd3dd7439c8d99137e..3897468140e02885ee12d6e98749984f1d0c1891 100644 (file)
@@ -36,6 +36,7 @@
 
 #include "intel_backlight.h"
 #include "intel_display_types.h"
+#include "intel_dp.h"
 #include "intel_dp_aux_backlight.h"
 
 /* TODO:
@@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
        int ret;
        u8 tcon_cap[4];
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
        if (ret != sizeof(tcon_cap))
                return false;
@@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
        int ret;
        u8 old_ctrl, ctrl;
 
+       intel_dp_wait_source_oui(intel_dp);
+
        ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
        if (ret != 1) {
                drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
@@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
        struct intel_panel *panel = &connector->panel;
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               u32 pwm_level = intel_backlight_invert_pwm_level(connector,
+                                                                panel->backlight.pwm_level_max);
+
+               panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+       }
+
        drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
 }
 
@@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
        struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
 
        drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
+
+       if (!panel->backlight.edp.vesa.info.aux_enable)
+               panel->backlight.pwm_funcs->disable(old_conn_state,
+                                                   intel_backlight_invert_pwm_level(connector, 0));
 }
 
 static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
@@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
        if (ret < 0)
                return ret;
 
+       if (!panel->backlight.edp.vesa.info.aux_enable) {
+               ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+               if (ret < 0) {
+                       drm_err(&i915->drm,
+                               "Failed to setup PWM backlight controls for eDP backlight: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
        panel->backlight.max = panel->backlight.edp.vesa.info.max;
        panel->backlight.min = 0;
        if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
@@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
-       /* TODO: We currently only support AUX only backlight configurations, not backlights which
-        * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
-        * work just fine using normal PWM controls anyway.
-        */
-       if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
-           drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
+       if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
                drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
                return true;
        }
index 4d7da07442f2a274620bf2b7bd4915e524918f25..9b24d9b5ade1f750d8e87c72e7f592c610f531c6 100644 (file)
@@ -3277,6 +3277,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
        if (IS_ERR(out_fence)) {
                err = PTR_ERR(out_fence);
+               out_fence = NULL;
                if (eb.requests[0])
                        goto err_request;
                else
index 67d14afa66237a26f553c8d3265074fe63387812..b67f620c3d93cd1b6e85dacee3216a869f3e4ed1 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h> /* fault-inject.h is not standalone! */
 
 #include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_lmem.h"
 #include "i915_trace.h"
index e1f36253088918c9bedc0805e3bb9a7f80235c7b..2400d6423ba5eda77c4cada5be72fd1204b7bc90 100644 (file)
@@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
               FF_MODE2_GS_TIMER_MASK,
               FF_MODE2_GS_TIMER_224,
               0, false);
-
-       /*
-        * Wa_14012131227:dg1
-        * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
-        */
-       wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
-                    GEN9_RHWO_OPTIMIZATION_DISABLE);
 }
 
 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -1134,6 +1127,15 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
                    GAMT_CHKN_BIT_REG,
                    GAMT_CHKN_DISABLE_L3_COH_PIPE);
 
+       /* Wa_1407352427:icl,ehl */
+       wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+                   PSDUNIT_CLKGATE_DIS);
+
+       /* Wa_1406680159:icl,ehl */
+       wa_write_or(wal,
+                   SUBSLICE_UNIT_LEVEL_CLKGATE,
+                   GWUNIT_CLKGATE_DIS);
+
        /* Wa_1607087056:icl,ehl,jsl */
        if (IS_ICELAKE(i915) ||
            IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1859,15 +1861,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
                            VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
 
-               /* Wa_1407352427:icl,ehl */
-               wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
-                           PSDUNIT_CLKGATE_DIS);
-
-               /* Wa_1406680159:icl,ehl */
-               wa_write_or(wal,
-                           SUBSLICE_UNIT_LEVEL_CLKGATE,
-                           GWUNIT_CLKGATE_DIS);
-
                /*
                 * Wa_1408767742:icl[a2..forever],ehl[all]
                 * Wa_1605460711:icl[a0..c0]
index c48557dfa04c4a73ef2dbdb51134722fe0b40142..302e9ff0602cc11442ee7d55bc6e39108d999e23 100644 (file)
@@ -1662,11 +1662,11 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
                GEM_BUG_ON(intel_context_is_parent(cn));
 
                list_del_init(&cn->guc_id.link);
-               ce->guc_id = cn->guc_id;
+               ce->guc_id.id = cn->guc_id.id;
 
-               spin_lock(&ce->guc_state.lock);
+               spin_lock(&cn->guc_state.lock);
                clr_context_registered(cn);
-               spin_unlock(&ce->guc_state.lock);
+               spin_unlock(&cn->guc_state.lock);
 
                set_context_guc_id_invalid(cn);
 
index 820a1f38b271e095ffbe17bf1294582110c1312b..89cccefeea635344e7ae045a87d3d7f03efbd9d2 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
 
 #include "gem/i915_gem_context.h"
 #include "gt/intel_breadcrumbs.h"
index 65fdca366e41f00d940f13a0865234fc47df5595..f74f8048af8f2c5551ae384871a9418d1347929b 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 #include <linux/clk.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 
index 5838c44cbf6f0dd221a9896c68beefc03ce48359..3196189429bcf895138d2160843b2e5dc2f2bc56 100644 (file)
@@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
                        return MODE_BAD;
        }
 
-       if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
-               return MODE_BAD;
+       if (hdmi->conf) {
+               if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
+                       return MODE_BAD;
 
-       if (hdmi->conf->max_mode_clock &&
-           mode->clock > hdmi->conf->max_mode_clock)
-               return MODE_CLOCK_HIGH;
+               if (hdmi->conf->max_mode_clock &&
+                   mode->clock > hdmi->conf->max_mode_clock)
+                       return MODE_CLOCK_HIGH;
+       }
 
        if (mode->clock < 27000)
                return MODE_CLOCK_LOW;
index ae11061727ff80d7141609909552bee59f444f1d..39197b4beea78b8ed7b93c5a1a98c4d524376cd6 100644 (file)
@@ -4,8 +4,8 @@ config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
        depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+       depends on COMMON_CLK
        depends on IOMMU_SUPPORT
-       depends on (OF && COMMON_CLK) || COMPILE_TEST
        depends on QCOM_OCMEM || QCOM_OCMEM=n
        depends on QCOM_LLCC || QCOM_LLCC=n
        depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
index 40577f8856d8f10cf1b1c45420df80d34321ef34..093454457545f14424408c8ea7393e332b9f1c4d 100644 (file)
@@ -23,8 +23,10 @@ msm-y := \
        hdmi/hdmi_i2c.o \
        hdmi/hdmi_phy.o \
        hdmi/hdmi_phy_8960.o \
+       hdmi/hdmi_phy_8996.o \
        hdmi/hdmi_phy_8x60.o \
        hdmi/hdmi_phy_8x74.o \
+       hdmi/hdmi_pll_8960.o \
        edp/edp.o \
        edp/edp_aux.o \
        edp/edp_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
        disp/mdp4/mdp4_dtv_encoder.o \
        disp/mdp4/mdp4_lcdc_encoder.o \
        disp/mdp4/mdp4_lvds_connector.o \
+       disp/mdp4/mdp4_lvds_pll.o \
        disp/mdp4/mdp4_irq.o \
        disp/mdp4/mdp4_kms.o \
        disp/mdp4/mdp4_plane.o \
@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
        dp/dp_audio.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
 
 msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
 
index 267a880811d654c78ba89de035fd660026afe898..78aad5216a613041fa210acb021208c42df918b9 100644 (file)
@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
 {
        struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       u32 gpu_scid, cntl1_regval = 0;
+       u32 cntl1_regval = 0;
 
        if (IS_ERR(a6xx_gpu->llc_mmio))
                return;
 
        if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
-               gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+               u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
 
                gpu_scid &= 0x1f;
                cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
                               (gpu_scid << 15) | (gpu_scid << 20);
+
+               /* On A660, the SCID programming for UCHE traffic is done in
+                * A6XX_GBIF_SCACHE_CNTL0[14:10]
+                */
+               if (adreno_is_a660_family(adreno_gpu))
+                       gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+                               (1 << 8), (gpu_scid << 10) | (1 << 8));
        }
 
        /*
@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
        }
 
        gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
-       /* On A660, the SCID programming for UCHE traffic is done in
-        * A6XX_GBIF_SCACHE_CNTL0[14:10]
-        */
-       if (adreno_is_a660_family(adreno_gpu))
-               gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
-                       (1 << 8), (gpu_scid << 10) | (1 << 8));
 }
 
 static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
index 7501849ed15d93c4871348421dd6c7f3c88254e7..6e90209cd543bf2819f5c8bfff8e7de26e8f66b4 100644 (file)
@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
        a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
-               2, sizeof(*a6xx_state->gmu_registers));
+               3, sizeof(*a6xx_state->gmu_registers));
 
        if (!a6xx_state->gmu_registers)
                return;
 
-       a6xx_state->nr_gmu_registers = 2;
+       a6xx_state->nr_gmu_registers = 3;
 
        /* Get the CX GMU registers from AHB */
        _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
index eb40d8413bca937e8c87ccf457e9cf88723801f7..6d36f63c333881c01f2ecc0cfc1a504537dbc4f7 100644 (file)
@@ -33,6 +33,7 @@ struct dp_aux_private {
        bool read;
        bool no_send_addr;
        bool no_send_stop;
+       bool initted;
        u32 offset;
        u32 segment;
 
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        mutex_lock(&aux->mutex);
+       if (!aux->initted) {
+               ret = -EIO;
+               goto exit;
+       }
 
        dp_aux_update_offset_and_segment(aux, msg);
        dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        }
 
        aux->cmd_busy = false;
+
+exit:
        mutex_unlock(&aux->mutex);
 
        return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
        dp_catalog_aux_enable(aux->catalog, true);
        aux->retry_cnt = 0;
+       aux->initted = true;
+
+       mutex_unlock(&aux->mutex);
 }
 
 void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
 
        aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+       mutex_lock(&aux->mutex);
+
+       aux->initted = false;
        dp_catalog_aux_enable(aux->catalog, false);
+
+       mutex_unlock(&aux->mutex);
 }
 
 int dp_aux_register(struct drm_dp_aux *dp_aux)
index f69a125f955958ae41d89cf9214c1cb963df30d6..0afc3b756f92ddd62f8207ad8dfc6b211b715d0e 100644 (file)
@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        if (!prop) {
                DRM_DEV_DEBUG(dev,
                        "failed to find data lane mapping, using default\n");
+               /* Set the number of date lanes to 4 by default. */
+               msm_host->num_data_lanes = 4;
                return 0;
        }
 
index 09d2d279c30ae7bc92ae35c9921a9425ea51c693..dee13fedee3b5cc925d4e7352c0906ebd8123521 100644 (file)
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
                goto free_priv;
 
        pm_runtime_get_sync(&gpu->pdev->dev);
+       msm_gpu_hw_init(gpu);
        show_priv->state = gpu->funcs->gpu_state_get(gpu);
        pm_runtime_put_sync(&gpu->pdev->dev);
 
index 7936e8d498dda30e900d5a76c8ab57f11ad94994..892c04365239bb4397a3f5aa8b5debdf9a1aee56 100644 (file)
@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
        return ret;
 }
 
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
-               struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+                     ktime_t timeout)
 {
-       struct msm_drm_private *priv = dev->dev_private;
-       struct drm_msm_wait_fence *args = data;
-       ktime_t timeout = to_ktime(args->timeout);
-       struct msm_gpu_submitqueue *queue;
-       struct msm_gpu *gpu = priv->gpu;
        struct dma_fence *fence;
        int ret;
 
-       if (args->pad) {
-               DRM_ERROR("invalid pad: %08x\n", args->pad);
+       if (fence_id > queue->last_fence) {
+               DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+                                     fence_id, queue->last_fence);
                return -EINVAL;
        }
 
-       if (!gpu)
-               return 0;
-
-       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
-       if (!queue)
-               return -ENOENT;
-
        /*
         * Map submitqueue scoped "seqno" (which is actually an idr key)
         * back to underlying dma-fence
@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        ret = mutex_lock_interruptible(&queue->lock);
        if (ret)
                return ret;
-       fence = idr_find(&queue->fence_idr, args->fence);
+       fence = idr_find(&queue->fence_idr, fence_id);
        if (fence)
                fence = dma_fence_get_rcu(fence);
        mutex_unlock(&queue->lock);
@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        }
 
        dma_fence_put(fence);
+
+       return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct drm_msm_wait_fence *args = data;
+       struct msm_gpu_submitqueue *queue;
+       int ret;
+
+       if (args->pad) {
+               DRM_ERROR("invalid pad: %08x\n", args->pad);
+               return -EINVAL;
+       }
+
+       if (!priv->gpu)
+               return 0;
+
+       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+       if (!queue)
+               return -ENOENT;
+
+       ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
        msm_submitqueue_put(queue);
 
        return ret;
index 104fdfc140278863c62a1dc1225c253d863e727a..512d55eecbaf15e225e3ba5f84f4107ae34962bb 100644 (file)
@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
 
        return 0;
@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
                        break;
                fallthrough;
        default:
-               DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+               DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
                return -EINVAL;
        }
index 4a1420b05e978fb2bcd618e0bde05171b080655d..086dacf2f26a74120f015a4d5f1ed93b9e0fdc1b 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
 
 #include "msm_drv.h"
 #include "msm_gem.h"
index 3cb029f1092555bfa4aa3ef1b0bd99d897dad23c..282628d6b72c09f8ed5bbf141e0330e417d463f4 100644 (file)
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                args->nr_cmds);
        if (IS_ERR(submit)) {
                ret = PTR_ERR(submit);
+               submit = NULL;
                goto out_unlock;
        }
 
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        drm_sched_entity_push_job(&submit->base);
 
        args->fence = submit->fence_id;
+       queue->last_fence = submit->fence_id;
 
        msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
        msm_process_post_deps(post_deps, args->nr_out_syncobjs,
index 59cdd00b69d0401e2720b2e37b8ff8d546c0c7ac..48ea2de911f1357bf69c7169dc0fce8f522e4c56 100644 (file)
@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
  * @ring_nr:   the ringbuffer used by this submitqueue, which is determined
  *             by the submitqueue's priority
  * @faults:    the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ *             checking)
  * @ctx:       the per-drm_file context associated with the submitqueue (ie.
  *             which set of pgtables do submits jobs associated with the
  *             submitqueue use)
@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
        u32 flags;
        u32 ring_nr;
        int faults;
+       uint32_t last_fence;
        struct msm_file_private *ctx;
        struct list_head node;
        struct idr fence_idr;
index 8b7473f69cb8fe6bf86a86a097dfd0396e5b4207..384e90c4b2a7999bb1310e91d0253afd3f8558be 100644 (file)
@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
        struct msm_gpu *gpu = dev_to_gpu(dev);
        struct dev_pm_opp *opp;
 
+       /*
+        * Note that devfreq_recommended_opp() can modify the freq
+        * to something that actually is in the opp table:
+        */
        opp = devfreq_recommended_opp(dev, freq, flags);
 
        /*
@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
         */
        if (gpu->devfreq.idle_freq) {
                gpu->devfreq.idle_freq = *freq;
+               dev_pm_opp_put(opp);
                return 0;
        }
 
@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
        struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
        unsigned long idle_freq, target_freq = 0;
 
-       if (!df->devfreq)
-               return;
-
        /*
         * Hold devfreq lock to synchronize with get_dev_status()/
         * target() callbacks
@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
 {
        struct msm_gpu_devfreq *df = &gpu->devfreq;
 
+       if (!df->devfreq)
+               return;
+
        msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
-                              HRTIMER_MODE_ABS);
+                              HRTIMER_MODE_REL);
 }
index 481b48bde0473fe56a9f56e970ce4ba130111c2b..5a6e89825bc2fc54ec02dcda1da5a47cc2212f02 100644 (file)
@@ -458,7 +458,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width,
 {
        struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
 
-       mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
+       mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
        drm_mode_set_name(&mode);
 
        return mode;
index 739f11c0109cbea1972cbe92f9bfcb93f2271e0c..047adc42d9a0dc6afd807b9ddfa79575ab937a78 100644 (file)
@@ -1103,7 +1103,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
         * as an indication that we're about to swap out.
         */
        memset(&place, 0, sizeof(place));
-       place.mem_type = TTM_PL_SYSTEM;
+       place.mem_type = bo->resource->mem_type;
        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
                return -EBUSY;
 
@@ -1135,6 +1135,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
                struct ttm_place hop;
 
                memset(&hop, 0, sizeof(hop));
+               place.mem_type = TTM_PL_SYSTEM;
                ret = ttm_resource_alloc(bo, &place, &evict_mem);
                if (unlikely(ret))
                        goto out;
index 7e83c00a3f48926b56519902599ed966e4c379d4..79c870a3bef8d5399d000aca435b32d55ebd56bc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/sched.h>
 #include <linux/shmem_fs.h>
 #include <linux/file.h>
+#include <linux/module.h>
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
 
index f0b3e4cf5bceb6c33f41663cbc10bd904d18e630..b61792d2aa65740db39b49f957dc41eda99c5824 100644 (file)
@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_hvs *hvs = vc4->hvs;
-       struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
        struct drm_crtc *crtc;
        struct vc4_hvs_state *old_hvs_state;
+       unsigned int channel;
        int i;
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
                vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
        }
 
-       if (vc4->hvs->hvs5)
-               clk_set_min_rate(hvs->core_clk, 500000000);
-
        old_hvs_state = vc4_hvs_get_old_global_state(state);
-       if (!old_hvs_state)
+       if (IS_ERR(old_hvs_state))
                return;
 
-       for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
-               struct vc4_crtc_state *vc4_crtc_state =
-                       to_vc4_crtc_state(old_crtc_state);
-               unsigned int channel = vc4_crtc_state->assigned_channel;
+       for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+               struct drm_crtc_commit *commit;
                int ret;
 
-               if (channel == VC4_HVS_CHANNEL_DISABLED)
+               if (!old_hvs_state->fifo_state[channel].in_use)
                        continue;
 
-               if (!old_hvs_state->fifo_state[channel].in_use)
+               commit = old_hvs_state->fifo_state[channel].pending_commit;
+               if (!commit)
                        continue;
 
-               ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+               ret = drm_crtc_commit_wait(commit);
                if (ret)
                        drm_err(dev, "Timed out waiting for commit\n");
+
+               drm_crtc_commit_put(commit);
+               old_hvs_state->fifo_state[channel].pending_commit = NULL;
        }
 
+       if (vc4->hvs->hvs5)
+               clk_set_min_rate(hvs->core_clk, 500000000);
+
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
        vc4_ctm_commit(vc4, state);
@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
        unsigned int i;
 
        hvs_state = vc4_hvs_get_new_global_state(state);
-       if (!hvs_state)
-               return -EINVAL;
+       if (WARN_ON(IS_ERR(hvs_state)))
+               return PTR_ERR(hvs_state);
 
        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                struct vc4_crtc_state *vc4_crtc_state =
@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
 
        for (i = 0; i < HVS_NUM_CHANNELS; i++) {
                state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
-
-               if (!old_state->fifo_state[i].pending_commit)
-                       continue;
-
-               state->fifo_state[i].pending_commit =
-                       drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
        }
 
        return &state->base;
@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
        unsigned int i;
 
        hvs_new_state = vc4_hvs_get_global_state(state);
-       if (!hvs_new_state)
-               return -EINVAL;
+       if (IS_ERR(hvs_new_state))
+               return PTR_ERR(hvs_new_state);
 
        for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
                if (!hvs_new_state->fifo_state[i].in_use)
index d86e1ad4a97260b82895d98bbb2feef93ef2ca7f..5072dbb0669a333fb4b722fa6b26167e20843c65 100644 (file)
@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
        schedule_work(&vgdev->config_changed_work);
 }
 
-static __poll_t virtio_gpu_poll(struct file *filp,
-                               struct poll_table_struct *wait)
-{
-       struct drm_file *drm_file = filp->private_data;
-       struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
-       struct drm_device *dev = drm_file->minor->dev;
-       struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct drm_pending_event *e = NULL;
-       __poll_t mask = 0;
-
-       if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
-               return drm_poll(filp, wait);
-
-       poll_wait(filp, &drm_file->event_wait, wait);
-
-       if (!list_empty(&drm_file->event_list)) {
-               spin_lock_irq(&dev->event_lock);
-               e = list_first_entry(&drm_file->event_list,
-                                    struct drm_pending_event, link);
-               drm_file->event_space += e->event->length;
-               list_del(&e->link);
-               spin_unlock_irq(&dev->event_lock);
-
-               kfree(e);
-               mask |= EPOLLIN | EPOLLRDNORM;
-       }
-
-       return mask;
-}
-
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
 MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
 MODULE_AUTHOR("Alon Levy");
 
-static const struct file_operations virtio_gpu_driver_fops = {
-       .owner          = THIS_MODULE,
-       .open           = drm_open,
-       .release        = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl   = drm_compat_ioctl,
-       .poll           = virtio_gpu_poll,
-       .read           = drm_read,
-       .llseek         = noop_llseek,
-       .mmap           = drm_gem_mmap
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
 
 static const struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
index e0265fe74aa565f639127042f7071faf80c5b50c..0a194aaad4192b7b5dff1b8e2762d0461fee9631 100644 (file)
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
        spinlock_t       lock;
 };
 
-#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
 struct virtio_gpu_fence_event {
        struct drm_pending_event base;
        struct drm_event event;
index 5618a1d5879c56382ed23d6aa046f58a88524c25..3607646d322954c9d07a3245e052cf7857fae8c5 100644 (file)
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
        if (!e)
                return -ENOMEM;
 
-       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+       e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
        e->event.length = sizeof(e->event);
 
        ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
index 9f5435b55949b3786b3be4fc76fd5444f7428658..a7c78ac96270d2eb15e585bbd6d31087aee0fec9 100644 (file)
@@ -207,14 +207,14 @@ config HID_CHERRY
 
 config HID_CHICONY
        tristate "Chicony devices"
-       depends on HID
+       depends on USB_HID
        default !EXPERT
        help
        Support for Chicony Tactical pad and special keys on Chicony keyboards.
 
 config HID_CORSAIR
        tristate "Corsair devices"
-       depends on HID && USB && LEDS_CLASS
+       depends on USB_HID && LEDS_CLASS
        help
        Support for Corsair devices that are not fully compliant with the
        HID standard.
@@ -245,7 +245,7 @@ config HID_MACALLY
 
 config HID_PRODIKEYS
        tristate "Prodikeys PC-MIDI Keyboard support"
-       depends on HID && SND
+       depends on USB_HID && SND
        select SND_RAWMIDI
        help
        Support for Prodikeys PC-MIDI Keyboard device support.
@@ -560,7 +560,7 @@ config HID_LENOVO
 
 config HID_LOGITECH
        tristate "Logitech devices"
-       depends on HID
+       depends on USB_HID
        depends on LEDS_CLASS
        default !EXPERT
        help
@@ -951,7 +951,7 @@ config HID_SAITEK
 
 config HID_SAMSUNG
        tristate "Samsung InfraRed remote control or keyboards"
-       depends on HID
+       depends on USB_HID
        help
        Support for Samsung InfraRed remote control or keyboards.
 
index f3ecddc519ee8c220f45f4315ec984677688cc26..08c9a9a60ae47a54045aa834aeb92b431205550b 100644 (file)
@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
                drvdata->tp = &asus_i2c_tp;
 
-       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                drvdata->tp = &asus_t100chi_tp;
        }
 
-       if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
-           hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
                struct usb_host_interface *alt =
                        to_usb_interface(hdev->dev.parent)->altsetting;
 
index db6da21ade06315c457d087447cecf563eac8c55..74ad8bf98bfd5acea3d24ecff58300bdab434a26 100644 (file)
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
                struct bigben_device, worker);
        struct hid_field *report_field = bigben->report->field[0];
 
-       if (bigben->removed)
+       if (bigben->removed || !report_field)
                return;
 
        if (bigben->work_led) {
index ca556d39da2aedb666733642194e2dced5e8a40a..f04d2aa23efe4647332f452f1f901a4a9cce5d93 100644 (file)
@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
        ret = hid_parse(hdev);
        if (ret) {
index 902a60e249ed268489b22809d6c1340d638ed954..8c895c820b672772340ec17d34dc60e198eb56a5 100644 (file)
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
        int ret;
        unsigned long quirks = id->driver_data;
        struct corsair_drvdata *drvdata;
-       struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
+       struct usb_interface *usbif;
+
+       if (!hid_is_usb(dev))
+               return -EINVAL;
+
+       usbif = to_usb_interface(dev->dev.parent);
 
        drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
                               GFP_KERNEL);
index 021049805bb71f50be232eb2160d32100b2a2b03..3091355d48df64a28ae1bc704eae4185274975f5 100644 (file)
@@ -50,7 +50,7 @@ struct elan_drvdata {
 
 static int is_not_elan_touchpad(struct hid_device *hdev)
 {
-       if (hdev->bus == BUS_USB) {
+       if (hid_is_usb(hdev)) {
                struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
 
                return (intf->altsetting->desc.bInterfaceNumber !=
index 383dfda8c12fcede9f98bce23a21f55dfab22118..8e960d7b233b3aad3eca844166c7299cb918cd12 100644 (file)
@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        struct usb_device *udev;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
index 8ee77f4afe9ff6306e1d0928e1b175a0fa77a757..79505c64dbfe72ebb00c518fde12ec0337d0d39c 100644 (file)
@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
        struct ft260_get_chip_version_report version;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
index 8123b871a3ebf7f200ce6017b60e31a2f5431700..0403beb3104b9e47af2c7cb79acf734879e05b24 100644 (file)
@@ -585,6 +585,8 @@ static void hammer_remove(struct hid_device *hdev)
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index 0a38e8e9bc783051f8bc634cff597885fdcb6161..403506b9697e75a3c2ac9042299bac8e1e2d342e 100644 (file)
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
 static int holtek_kbd_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       int ret = hid_parse(hdev);
+       struct usb_interface *intf;
+       int ret;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
 
+       ret = hid_parse(hdev);
        if (!ret)
                ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
 
+       intf = to_usb_interface(hdev->dev.parent);
        if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                struct hid_input *hidinput;
                list_for_each_entry(hidinput, &hdev->inputs, list) {
index 195b735b001d03d56f366b63b0ceb14b2d5d7552..7c907939bfae1d9a5398f574a284b66190fcbfd5 100644 (file)
@@ -62,6 +62,29 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        return rdesc;
 }
 
+static int holtek_mouse_probe(struct hid_device *hdev,
+                             const struct hid_device_id *id)
+{
+       int ret;
+
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       ret = hid_parse(hdev);
+       if (ret) {
+               hid_err(hdev, "hid parse failed: %d\n", ret);
+               return ret;
+       }
+
+       ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+       if (ret) {
+               hid_err(hdev, "hw start failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static const struct hid_device_id holtek_mouse_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@@ -83,6 +106,7 @@ static struct hid_driver holtek_mouse_driver = {
        .name = "holtek_mouse",
        .id_table = holtek_mouse_devices,
        .report_fixup = holtek_mouse_report_fixup,
+       .probe = holtek_mouse_probe,
 };
 
 module_hid_driver(holtek_mouse_driver);
index 96a455921c67ae991621ddc77f73523fbc2f9645..19da07777d6283f589061d5309177e3b5640145d 100644 (file)
 #define USB_DEVICE_ID_HP_X2_10_COVER   0x0755
 #define I2C_DEVICE_ID_HP_ENVY_X360_15  0x2d05
 #define I2C_DEVICE_ID_HP_SPECTRE_X360_15       0x2817
+#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN   0x261A
 
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 #define USB_DEVICE_ID_GOOGLE_DON       0x5050
+#define USB_DEVICE_ID_GOOGLE_EEL       0x5057
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
 #define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
 #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+#define USB_DEVICE_ID_MS_SURFACE3_COVER                0x07de
 #define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
 #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
 #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
index 217f2d1b91c56edde63f4457d2344221e6d1cb6c..03f994541981c1cf245a3eea0a38e3dedbdcf99f 100644 (file)
@@ -325,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
          HID_BATTERY_QUIRK_IGNORE },
        { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
          HID_BATTERY_QUIRK_IGNORE },
        { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
index d40af911df635a42f763459d9c3792bbe59847b3..fb3f7258009c26a4c86b5a7cfc679976ed43bb01 100644 (file)
@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
 
 static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
-       struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
-       __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *iface;
+       __u8 iface_num;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
        struct lg_drv_data *drv_data;
        int ret;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       iface = to_usb_interface(hdev->dev.parent);
+       iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+
        /* G29 only work with the 1st interface */
        if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
            (iface_num != 0)) {
index a0017b010c3421bef0eb09c0606fdf5b3597c86e..7106b921b53cf5038137fef8aeaead8b91affa00 100644 (file)
@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
        case recvr_type_bluetooth:      no_dj_interfaces = 2; break;
        case recvr_type_dinovo:         no_dj_interfaces = 2; break;
        }
-       if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (hid_is_usb(hdev)) {
                intf = to_usb_interface(hdev->dev.parent);
                if (intf && intf->altsetting->desc.bInterfaceNumber >=
                                                        no_dj_interfaces) {
index 2666af02d5c1a11f1a32f098eecb61ddc458cf0a..e4e9471d0f1e92d9b17a3c0f90d1b7dc0ceeb7de 100644 (file)
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
 static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int ret;
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+       struct usb_interface *intf;
+       unsigned short ifnum;
        unsigned long quirks = id->driver_data;
        struct pk_device *pk;
        struct pcmidi_snd *pm = NULL;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
+       intf = to_usb_interface(hdev->dev.parent);
+       ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
        pk = kzalloc(sizeof(*pk), GFP_KERNEL);
        if (pk == NULL) {
                hid_err(hdev, "can't alloc descriptor\n");
index 06b7908c874c18acb23f83aa86fd366f39c2b36b..ee7e504e7279f3522f9fdafd73dc5c809a6a4378 100644 (file)
@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
index 4556d2a50f754ebd56feea419e3aafb950627eea..d94ee0539421e7f9a742c6622f65059649440ad9 100644 (file)
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ce5f22519956acf91527d390d99f3ff35a424420..e95d59cd8d075dc82716fb47a4193286bd0c2b09 100644 (file)
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index ea17abc7ad5212d842e6276413490f6fe19825ec..76da04801ca9bde5c882c89444529eb7cb8c6578 100644 (file)
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 0316edf8c5bb44e4d72e5a9a6ab858b53d0eefbb..1896c69ea512f787b264ed27f82f13d813fd82ea 100644 (file)
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 5248b3c7cf7859abbd62dab6fed53e88d1b37773..cf8eeb33a12571bf47c2c57118ff7935909cd865 100644 (file)
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 960012881570569df2d31c368b33c06f4f67ba4d..6fb9b9563769dfbcc1cfc521c1f113feb442d411 100644 (file)
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 4a88a76d5c62293b890c56093d469cf6687a7baf..d5ddf0d68346b2c147f3c888badb77c06d77f46c 100644 (file)
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 989927defe8db84d6d1916424a9c1d50fc5c3911..4fcc8e7d276f228cb2c29f99df8e6d5db9eef119 100644 (file)
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 3956a6c9c5217efdb88e25c918f3598a9e5e61fb..5bf1971a2b14d83c6388d3d1e9541617ff415fa6 100644 (file)
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 818701f7a028178b2c565e1506a28360c1deee08..a784bb4ee6512d8b39a0ede9a653b29cf901bf5d 100644 (file)
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
 {
        int retval;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        retval = hid_parse(hdev);
        if (retval) {
                hid_err(hdev, "parse failed\n");
index 2e1c31156eca046417d65d76d0e8aa847719ecb3..cf5992e97094003628a4aaacbded99a9f3603a10 100644 (file)
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
        int ret;
        unsigned int cmask = HID_CONNECT_DEFAULT;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed\n");
index d1b107d547f546719b9b449b8e420feb535bcd70..60ec2b29d54de49e0b19c57cc57288efa47af7a3 100644 (file)
@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
-       usbdev = to_usb_device(sc->hdev->dev.parent->parent);
 
        ret = hid_parse(hdev);
        if (ret) {
@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
                hid_err(hdev, "failed to claim input\n");
-               hid_hw_stop(hdev);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err;
        }
 
        if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
+               if (!hid_is_usb(hdev)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
                sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
-               if (!sc->ghl_urb)
-                       return -ENOMEM;
+               if (!sc->ghl_urb) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
 
                if (sc->quirks & GHL_GUITAR_PS3WIIU)
                        ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                                                           ARRAY_SIZE(ghl_ps4_magic_data));
                if (ret) {
                        hid_err(hdev, "error preparing URB\n");
-                       return ret;
+                       goto err;
                }
 
                timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
        }
 
        return ret;
+
+err:
+       hid_hw_stop(hdev);
+       return ret;
 }
 
 static void sony_remove(struct hid_device *hdev)
index 3a5333424aa32776bd33a87bf8488f4b8f01d4d9..03b935ff02d5616f67e3287ceeb0dc9926703429 100644 (file)
@@ -274,6 +274,9 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        int ret = 0;
        struct tm_wheel *tm_wheel = NULL;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        ret = hid_parse(hdev);
        if (ret) {
                hid_err(hdev, "parse failed with error %d\n", ret);
index 31ea7fc69916b1f95b57645dac7bc1ccaa3152ab..ad489caf53ad80e60d2a9f2f64d47143816d139c 100644 (file)
@@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev,
        unsigned int minor;
        int ret;
 
-       if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+       if (!hid_is_usb(hdev))
                return -EINVAL;
 
        dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
index 6a9865dd703c027be18d1420c5fec1d764b61631..d8ab0139e5cdae30d70191ae08d7a479849b04d2 100644 (file)
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
        struct uclogic_drvdata *drvdata = NULL;
        bool params_initialized = false;
 
+       if (!hid_is_usb(hdev))
+               return -EINVAL;
+
        /*
         * libinput requires the pad interface to be on a different node
         * than the pen, so use QUIRK_MULTI_INPUT for all tablets.
index 3d67b748a3b959f4febddc2db3f7a3f408924256..adff1bd68d9f84819a15043e15c786acfd934579 100644 (file)
@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
        struct uclogic_params p = {0, };
 
        /* Check arguments */
-       if (params == NULL || hdev == NULL ||
-           !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+       if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
                rc = -EINVAL;
                goto cleanup;
        }
index cd7ada48b1d9fffb2bca0f026bed8842e497f9bf..72957a9f7117052fb7b7e9bdf0fd87d14018400e 100644 (file)
@@ -57,6 +57,9 @@ static int vivaldi_probe(struct hid_device *hdev,
        int ret;
 
        drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
        hid_set_drvdata(hdev, drvdata);
 
        ret = hid_parse(hdev);
index 1c5039081db2786474d8cdc9846f0f18bba58090..8e9d9450cb835df16dbc556dd0043713dd7d986a 100644 (file)
@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
 
        if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
                        && IPC_IS_ISH_ILUP(fwsts)) {
-               disable_irq_wake(pdev->irq);
+               if (device_may_wakeup(&pdev->dev))
+                       disable_irq_wake(pdev->irq);
 
                ish_set_host_ready(dev);
 
@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
                         */
                        pci_save_state(pdev);
 
-                       enable_irq_wake(pdev->irq);
+                       if (device_may_wakeup(&pdev->dev))
+                               enable_irq_wake(pdev->irq);
                }
        } else {
                /*
index 2717d39600b402af0b8b7a1498926468a38239b2..066c567dbaa229f3fa5948cbc3cc1484e8bf5e2b 100644 (file)
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
         * Skip the query for this type and modify defaults based on
         * interface number.
         */
-       if (features->type == WIRELESS) {
+       if (features->type == WIRELESS && intf) {
                if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
                        features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
                else
@@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
        if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
                char *product_name = wacom->hdev->name;
 
-               if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+               if (hid_is_usb(wacom->hdev)) {
                        struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
                        struct usb_device *dev = interface_to_usbdev(intf);
                        product_name = dev->product;
@@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work)
 
        wacom_destroy_battery(wacom);
 
+       if (!usbdev)
+               return;
+
        /* Stylus interface */
        hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
        wacom1 = hid_get_drvdata(hdev1);
@@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work)
 static int wacom_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       struct usb_device *dev = interface_to_usbdev(intf);
        struct wacom *wacom;
        struct wacom_wac *wacom_wac;
        struct wacom_features *features;
@@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev,
        wacom_wac->hid_data.inputmode = -1;
        wacom_wac->mode_report = -1;
 
-       wacom->usbdev = dev;
-       wacom->intf = intf;
+       if (hid_is_usb(hdev)) {
+               struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+               struct usb_device *dev = interface_to_usbdev(intf);
+
+               wacom->usbdev = dev;
+               wacom->intf = intf;
+       }
+
        mutex_init(&wacom->lock);
        INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
        INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
index dd12af20e467edab92ad26ee3a51e6c627468185..0747a8f1fceec956ee8174c738f8bf7bcadc1a16 100644 (file)
@@ -19,6 +19,7 @@ config HYPERV_TIMER
 config HYPERV_UTILS
        tristate "Microsoft Hyper-V Utilities driver"
        depends on HYPERV && CONNECTOR && NLS
+       depends on PTP_1588_CLOCK_OPTIONAL
        help
          Select this option to enable the Hyper-V Utilities.
 
index 731d5117f9f10879506a9f123f46e4f1b7513de2..14389fd7afb896a9a4032d640b3418e810880d9e 100644 (file)
@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
        corsairpsu_check_cmd_support(priv);
 
        priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
-                                                         &corsairpsu_chip_info, 0);
+                                                         &corsairpsu_chip_info, NULL);
 
        if (IS_ERR(priv->hwmon_dev)) {
                ret = PTR_ERR(priv->hwmon_dev);
index eaace478f50878f6ffb4a8b26caee1f190a1c15c..5596c211f38d92f64a351ff3add5459dba9a3edd 100644 (file)
@@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
 {
        struct dell_smm_data *data = dev_get_drvdata(dev);
 
-       /* Register the proc entry */
-       proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
-
-       devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+       /* Only register exit function if creation was successful */
+       if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
+               devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
 }
 
 #else
index 618052c6cdb6458f85fdd7ae91b95f72a3c19512..74019dff2550efef903814d3ffaaaee4e168ad91 100644 (file)
  * explicitly as max6659, or if its address is not 0x4c.
  * These chips lack the remote temperature offset feature.
  *
- * This driver also supports the MAX6654 chip made by Maxim. This chip can
- * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
- * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
- * by setting the configuration register accordingly, and is done during
- * initialization. Extended precision is only available at conversion rates
- * of 1 Hz and slower. Note that extended precision is not enabled by
- * default, as this driver initializes all chips to 2 Hz by design.
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can be
+ * at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar
+ * to MAX6657/MAX6658/MAX6659, but does not support critical temperature
+ * limits. Extended range is available by setting the configuration register
+ * accordingly, and is done during initialization. Extended precision is only
+ * available at conversion rates of 1 Hz and slower. Note that extended
+ * precision is not enabled by default, as this driver initializes all chips
+ * to 2 Hz by design.
  *
  * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
  * MAX6692 chips made by Maxim.  These are again similar to the LM86,
@@ -188,6 +189,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
 #define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert                */
 #define LM90_HAVE_EXTENDED_TEMP        (1 << 8) /* extended temperature support*/
 #define LM90_PAUSE_FOR_CONFIG  (1 << 9) /* Pause conversion for config */
+#define LM90_HAVE_CRIT         (1 << 10)/* Chip supports CRIT/OVERT register   */
+#define LM90_HAVE_CRIT_ALRM_SWP        (1 << 11)/* critical alarm bits swapped */
 
 /* LM90 status */
 #define LM90_STATUS_LTHRM      (1 << 0) /* local THERM limit tripped */
@@ -197,6 +200,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
 #define LM90_STATUS_RHIGH      (1 << 4) /* remote high temp limit tripped */
 #define LM90_STATUS_LLOW       (1 << 5) /* local low temp limit tripped */
 #define LM90_STATUS_LHIGH      (1 << 6) /* local high temp limit tripped */
+#define LM90_STATUS_BUSY       (1 << 7) /* conversion is ongoing */
 
 #define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
 #define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
@@ -354,38 +358,43 @@ struct lm90_params {
 static const struct lm90_params lm90_params[] = {
        [adm1032] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 10,
        },
        [adt7461] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 10,
        },
        [g781] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
        },
        [lm86] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [lm90] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [lm99] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+                 | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
        },
        [max6646] = {
+               .flags = LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 6,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -396,50 +405,51 @@ static const struct lm90_params lm90_params[] = {
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6657] = {
-               .flags = LM90_PAUSE_FOR_CONFIG,
+               .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6659] = {
-               .flags = LM90_HAVE_EMERGENCY,
+               .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [max6680] = {
-               .flags = LM90_HAVE_OFFSET,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
+                 | LM90_HAVE_CRIT_ALRM_SWP,
                .alert_alarms = 0x7c,
                .max_convrate = 7,
        },
        [max6696] = {
                .flags = LM90_HAVE_EMERGENCY
-                 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+                 | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
                .alert_alarms = 0x1c7c,
                .max_convrate = 6,
                .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
        },
        [w83l771] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 8,
        },
        [sa56004] = {
-               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+               .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
                .alert_alarms = 0x7b,
                .max_convrate = 9,
                .reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
        },
        [tmp451] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 9,
                .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
        },
        [tmp461] = {
                .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
-                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+                 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
                .alert_alarms = 0x7c,
                .max_convrate = 9,
                .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -668,20 +678,22 @@ static int lm90_update_limits(struct device *dev)
        struct i2c_client *client = data->client;
        int val;
 
-       val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
-       if (val < 0)
-               return val;
-       data->temp8[LOCAL_CRIT] = val;
+       if (data->flags & LM90_HAVE_CRIT) {
+               val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+               if (val < 0)
+                       return val;
+               data->temp8[LOCAL_CRIT] = val;
 
-       val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
-       if (val < 0)
-               return val;
-       data->temp8[REMOTE_CRIT] = val;
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+               if (val < 0)
+                       return val;
+               data->temp8[REMOTE_CRIT] = val;
 
-       val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
-       if (val < 0)
-               return val;
-       data->temp_hyst = val;
+               val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+               if (val < 0)
+                       return val;
+               data->temp_hyst = val;
+       }
 
        val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
        if (val < 0)
@@ -809,7 +821,7 @@ static int lm90_update_device(struct device *dev)
                val = lm90_read_reg(client, LM90_REG_R_STATUS);
                if (val < 0)
                        return val;
-               data->alarms = val;     /* lower 8 bit of alarms */
+               data->alarms = val & ~LM90_STATUS_BUSY;
 
                if (data->kind == max6696) {
                        val = lm90_select_remote_channel(data, 1);
@@ -1160,8 +1172,8 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
        else
                temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
 
-       /* prevent integer underflow */
-       val = max(val, -128000l);
+       /* prevent integer overflow/underflow */
+       val = clamp_val(val, -128000l, 255000l);
 
        data->temp_hyst = hyst_to_reg(temp - val);
        err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -1192,6 +1204,7 @@ static const u8 lm90_temp_emerg_index[3] = {
 static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
 static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
 static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
+static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
 static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
 static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
 
@@ -1217,7 +1230,10 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
                *val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
                break;
        case hwmon_temp_crit_alarm:
-               *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
+               if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
+                       *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
+               else
+                       *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
                break;
        case hwmon_temp_emergency_alarm:
                *val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
@@ -1465,12 +1481,11 @@ static int lm90_detect(struct i2c_client *client,
        if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
                return -ENODEV;
 
-       if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
+       if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
                config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
                if (config2 < 0)
                        return -ENODEV;
-       } else
-               config2 = 0;            /* Make compiler happy */
+       }
 
        if ((address == 0x4C || address == 0x4D)
         && man_id == 0x01) { /* National Semiconductor */
@@ -1903,11 +1918,14 @@ static int lm90_probe(struct i2c_client *client)
        info->config = data->channel_config;
 
        data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
-               HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
-               HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+               HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
        data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
-               HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
-               HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+               HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+
+       if (data->flags & LM90_HAVE_CRIT) {
+               data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+               data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+       }
 
        if (data->flags & LM90_HAVE_OFFSET)
                data->channel_config[1] |= HWMON_T_OFFSET;
index 93dca471972ea352b1129e543e084b25acbdd1e8..57ce8633a72561d2e126e868c3aede44e6f7478e 100644 (file)
@@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
 
        nct6775_wmi_set_bank(data, reg);
 
-       err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+       err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
        if (err)
                return 0;
 
index 17518b4cab1b007092dd4dd3973ebabe2e6b6e43..f12b9a28a232d498a68dcfe025998342e2137783 100644 (file)
@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       ctx->pwm_value = MAX_PWM;
-
        pwm_init_state(ctx->pwm, &ctx->pwm_state);
 
        /*
index 09c2a0b0644440d0cd27e29236c3678621593780..3415d7a0e0fc78a4acd3a5e3a9283b98eea78e20 100644 (file)
@@ -23,7 +23,7 @@
 /*
  * I2C command delays (in microseconds)
  */
-#define SHT4X_MEAS_DELAY       1000
+#define SHT4X_MEAS_DELAY_HPM   8200    /* see t_MEAS,h in datasheet */
 #define SHT4X_DELAY_EXTRA      10000
 
 /*
@@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
        if (ret < 0)
                goto unlock;
 
-       usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+       usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
 
        ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
        if (ret != SHT4X_RESPONSE_LENGTH) {
index 72df563477b1c3e3d99feea2126bdc03fd408cd2..f8639a4457d23ae55eece7874c10e3a05562b58d 100644 (file)
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
 }
 
 static const struct i2c_algorithm cbus_i2c_algo = {
-       .smbus_xfer     = cbus_i2c_smbus_xfer,
-       .functionality  = cbus_i2c_func,
+       .smbus_xfer             = cbus_i2c_smbus_xfer,
+       .smbus_xfer_atomic      = cbus_i2c_smbus_xfer,
+       .functionality          = cbus_i2c_func,
 };
 
 static int cbus_i2c_remove(struct platform_device *pdev)
index a6ea1eb1394e1cd78b4c4e7711a2b6d15ff913cb..53b8da6dbb23f0ec8768b192d537f732d6a46dce 100644 (file)
@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
        status = readb(i2c->base + MPC_I2C_SR);
        if (status & CSR_MIF) {
                /* Wait up to 100us for transfer to properly complete */
-               readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
+               readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
                writeb(0, i2c->base + MPC_I2C_SR);
                mpc_i2c_do_intr(i2c, status);
                return IRQ_HANDLED;
index 819ab4ee517e13cf1367cbfb157a72add5129d6b..02ddb237f69afdfcc766f1703e9b81297882534a 100644 (file)
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
        if (!(ipd & REG_INT_MBRF))
                return;
 
-       /* ack interrupt */
-       i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+       /* ack interrupt (read also produces a spurious START flag, clear it too) */
+       i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
 
        /* Can only handle a maximum of 32 bytes at a time */
        if (len > 32)
index b9b19a2a2ffa0b83957cdec0314b636db968b168..66145d2b9b55867e16dce3a756877fc145a4e323 100644 (file)
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
 {
        struct stm32f7_i2c_dev *i2c_dev = data;
        struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+       struct stm32_i2c_dma *dma = i2c_dev->dma;
        void __iomem *base = i2c_dev->base;
        u32 status, mask;
        int ret = IRQ_HANDLED;
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
                        __func__, f7_msg->addr);
                writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+               if (i2c_dev->use_dma) {
+                       stm32f7_i2c_disable_dma_req(i2c_dev);
+                       dmaengine_terminate_async(dma->chan_using);
+               }
                f7_msg->result = -ENXIO;
        }
 
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                /* Clear STOP flag */
                writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
 
-               if (i2c_dev->use_dma) {
+               if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else {
                        i2c_dev->master_mode = false;
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
                if (f7_msg->stop) {
                        mask = STM32F7_I2C_CR2_STOP;
                        stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
-               } else if (i2c_dev->use_dma) {
+               } else if (i2c_dev->use_dma && !f7_msg->result) {
                        ret = IRQ_WAKE_THREAD;
                } else if (f7_msg->smbus) {
                        stm32f7_i2c_smbus_rep_start(i2c_dev);
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
        if (!ret) {
                dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
                f7_msg->result = -ETIMEDOUT;
        }
 
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        /* Disable dma */
        if (i2c_dev->use_dma) {
                stm32f7_i2c_disable_dma_req(i2c_dev);
-               dmaengine_terminate_all(dma->chan_using);
+               dmaengine_terminate_async(dma->chan_using);
        }
 
        i2c_dev->master_mode = false;
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
        time_left = wait_for_completion_timeout(&i2c_dev->complete,
                                                i2c_dev->adap.timeout);
        ret = f7_msg->result;
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
+               goto pm_free;
+       }
 
        if (!time_left) {
                dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
                        i2c_dev->msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
        }
 
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
        timeout = wait_for_completion_timeout(&i2c_dev->complete,
                                              i2c_dev->adap.timeout);
        ret = f7_msg->result;
-       if (ret)
+       if (ret) {
+               if (i2c_dev->use_dma)
+                       dmaengine_synchronize(dma->chan_using);
+
+               /*
+                * It is possible that some unsent data have already been
+                * written into TXDR. To avoid sending old data in a
+                * further transfer, flush TXDR in case of any error
+                */
+               writel_relaxed(STM32F7_I2C_ISR_TXE,
+                              i2c_dev->base + STM32F7_I2C_ISR);
                goto pm_free;
+       }
 
        if (!timeout) {
                dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
                if (i2c_dev->use_dma)
-                       dmaengine_terminate_all(dma->chan_using);
+                       dmaengine_terminate_sync(dma->chan_using);
+               stm32f7_i2c_wait_free_bus(i2c_dev);
                ret = -ETIMEDOUT;
                goto pm_free;
        }
index 95378780da6d6df0168080488b5ac451719c6d97..41eb0dcc3204fe86ddcc7570730ab5bf509850e3 100644 (file)
 /**
  * struct virtio_i2c - virtio I2C data
  * @vdev: virtio device for this controller
- * @completion: completion of virtio I2C message
  * @adap: I2C adapter for this controller
  * @vq: the virtio virtqueue for communication
  */
 struct virtio_i2c {
        struct virtio_device *vdev;
-       struct completion completion;
        struct i2c_adapter adap;
        struct virtqueue *vq;
 };
 
 /**
  * struct virtio_i2c_req - the virtio I2C request structure
+ * @completion: completion of virtio I2C message
  * @out_hdr: the OUT header of the virtio I2C message
  * @buf: the buffer into which data is read, or from which it's written
  * @in_hdr: the IN header of the virtio I2C message
  */
 struct virtio_i2c_req {
+       struct completion completion;
        struct virtio_i2c_out_hdr out_hdr       ____cacheline_aligned;
        uint8_t *buf                            ____cacheline_aligned;
        struct virtio_i2c_in_hdr in_hdr         ____cacheline_aligned;
@@ -47,9 +47,11 @@ struct virtio_i2c_req {
 
 static void virtio_i2c_msg_done(struct virtqueue *vq)
 {
-       struct virtio_i2c *vi = vq->vdev->priv;
+       struct virtio_i2c_req *req;
+       unsigned int len;
 
-       complete(&vi->completion);
+       while ((req = virtqueue_get_buf(vq, &len)))
+               complete(&req->completion);
 }
 
 static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
        for (i = 0; i < num; i++) {
                int outcnt = 0, incnt = 0;
 
+               init_completion(&reqs[i].completion);
+
                /*
                 * Only 7-bit mode supported for this moment. For the address
                 * format, Please check the Virtio I2C Specification.
@@ -106,21 +110,15 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
                                    struct virtio_i2c_req *reqs,
                                    struct i2c_msg *msgs, int num)
 {
-       struct virtio_i2c_req *req;
        bool failed = false;
-       unsigned int len;
        int i, j = 0;
 
        for (i = 0; i < num; i++) {
-               /* Detach the ith request from the vq */
-               req = virtqueue_get_buf(vq, &len);
+               struct virtio_i2c_req *req = &reqs[i];
 
-               /*
-                * Condition req == &reqs[i] should always meet since we have
-                * total num requests in the vq. reqs[i] can never be NULL here.
-                */
-               if (!failed && (WARN_ON(req != &reqs[i]) ||
-                               req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+               wait_for_completion(&req->completion);
+
+               if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
                        failed = true;
 
                i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
@@ -156,12 +154,8 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * remote here to clear the virtqueue, so we can try another set of
         * messages later on.
         */
-
-       reinit_completion(&vi->completion);
        virtqueue_kick(vq);
 
-       wait_for_completion(&vi->completion);
-
        count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
 
 err_free:
@@ -210,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
        vdev->priv = vi;
        vi->vdev = vdev;
 
-       init_completion(&vi->completion);
-
        ret = virtio_i2c_setup_vqs(vi);
        if (ret)
                return ret;
index 92c1cc07ed468fd727b16a01c0bdfb965f4425e4..15332baa9e8588e8c27c6c9406b38aee43cb5cb6 100644 (file)
@@ -254,6 +254,13 @@ static void i2c_acpi_register_device(struct i2c_adapter *adapter,
                                     struct acpi_device *adev,
                                     struct i2c_board_info *info)
 {
+       /*
+        * Skip registration on boards where the ACPI tables are
+        * known to contain bogus I2C devices.
+        */
+       if (acpi_quirk_skip_i2c_client_enumeration(adev))
+               return;
+
        adev->power.flags.ignore_parent = true;
        acpi_device_set_enumerated(adev);
 
index a51fdd3c9b5b5b6bdfa059cceb3086fa0adfa62f..24c9387c296879cffe851bfcbac7dcaea343634a 100644 (file)
@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
        return 0;
 
 err_buffer_cleanup:
-       if (data->dready_trig)
-               iio_triggered_buffer_cleanup(indio_dev);
+       iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
        if (data->dready_trig)
                iio_trigger_unregister(data->dready_trig);
@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
 
+       iio_triggered_buffer_cleanup(indio_dev);
        if (data->dready_trig) {
-               iio_triggered_buffer_cleanup(indio_dev);
                iio_trigger_unregister(data->dready_trig);
                iio_trigger_unregister(data->motion_trig);
        }
index 2faf85ca996e1daffb218df82361da71a29c02da..552eba5e8b4fda35799ebcae61e9b91e6f15ecff 100644 (file)
@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
                               hw_values.chan,
                               sizeof(hw_values.chan));
        if (ret) {
-               dev_err(st->dev,
-                       "error reading data\n");
-               return ret;
+               dev_err(st->dev, "error reading data: %d\n", ret);
+               goto out;
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev,
                                           &hw_values,
                                           iio_get_time_ns(indio_dev));
+out:
        iio_trigger_notify_done(indio_dev->trig);
 
        return IRQ_HANDLED;
index 715b8138fb715cb64897635364c853437a4afd08..09c7f10fefb6e426c02736c84242e8ccf3d4d9d6 100644 (file)
@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
        if (ret)
                return ret;
 
-       indio_dev->trig = trig;
+       indio_dev->trig = iio_trigger_get(trig);
 
        return 0;
 }
index 8bf5b62a73f423ebf358e43719449673b7fa0866..3363af15a43f886ced8ae53a05b7b5db3e625158 100644 (file)
@@ -532,7 +532,7 @@ config IMX7D_ADC
 
 config IMX8QXP_ADC
        tristate "NXP IMX8QXP ADC driver"
-       depends on ARCH_MXC_ARM64 || COMPILE_TEST
+       depends on ARCH_MXC || COMPILE_TEST
        depends on HAS_IOMEM
        help
          Say yes here to build support for IMX8QXP ADC.
index 2c5c8a3672b2d5d28f9392e16ff8bcfaa8d599e5..aa42ba759fa1a0013bf544558bfae39813076e8d 100644 (file)
@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
        iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
                                           iio_get_time_ns(indio_dev));
 
-       iio_trigger_notify_done(indio_dev->trig);
 err_unlock:
+       iio_trigger_notify_done(indio_dev->trig);
        mutex_unlock(&st->lock);
 
        return IRQ_HANDLED;
index 4c922ef634f8e81b5c45ce4dfcdf4f31722f03ca..92a57cf10fba4a05d797b7308404a66255263824 100644 (file)
@@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
                *val = st->conversion_value;
                ret = at91_adc_adjust_val_osr(st, val);
                if (chan->scan_type.sign == 's')
-                       *val = sign_extend32(*val, 11);
+                       *val = sign_extend32(*val,
+                                            chan->scan_type.realbits - 1);
                st->conversion_done = false;
        }
 
index 3e0c0233b43156c899398c34b240001097d40058..df99f1365c398306524389f376002060247b6fb0 100644 (file)
@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
                          struct iio_chan_spec const *chan, int *val)
 {
        struct axp20x_adc_iio *info = iio_priv(indio_dev);
-       int size;
 
-       /*
-        * N.B.: Unlike the Chinese datasheets tell, the charging current is
-        * stored on 12 bits, not 13 bits. Only discharging current is on 13
-        * bits.
-        */
-       if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
-               size = 13;
-       else
-               size = 12;
-
-       *val = axp20x_read_variable_width(info->regmap, chan->address, size);
+       *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
        if (*val < 0)
                return *val;
 
@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
                return IIO_VAL_INT_PLUS_MICRO;
 
        case IIO_CURRENT:
-               *val = 0;
-               *val2 = 500000;
-               return IIO_VAL_INT_PLUS_MICRO;
+               *val = 1;
+               return IIO_VAL_INT;
 
        case IIO_TEMP:
                *val = 100;
index 16407664182ce8005642ccd7d09ec15f4958a596..97d162a3cba4eaaec1442718898d539698529951 100644 (file)
@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
 {
        int ret, i;
-       struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
        u16 conflict;
        __le16 value;
        int olen = sizeof(value);
@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
                .chan = channel,
        };
 
-       ret = iio_device_claim_direct_mode(indio_dev);
-       if (ret < 0)
-               return ret;
-
        ret = dln2_adc_set_chan_enabled(dln2, channel, true);
        if (ret < 0)
-               goto release_direct;
+               return ret;
 
        ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
        if (ret < 0) {
@@ -300,8 +295,6 @@ disable_port:
        dln2_adc_set_port_enabled(dln2, false, NULL);
 disable_chan:
        dln2_adc_set_chan_enabled(dln2, channel, false);
-release_direct:
-       iio_device_release_direct_mode(indio_dev);
 
        return ret;
 }
@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret < 0)
+                       return ret;
+
                mutex_lock(&dln2->mutex);
                ret = dln2_adc_read(dln2, chan->channel);
                mutex_unlock(&dln2->mutex);
 
+               iio_device_release_direct_mode(indio_dev);
+
                if (ret < 0)
                        return ret;
 
@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        iio_trigger_set_drvdata(dln2->trig, dln2);
-       devm_iio_trigger_register(dev, dln2->trig);
+       ret = devm_iio_trigger_register(dev, dln2->trig);
+       if (ret) {
+               dev_err(dev, "failed to register trigger: %d\n", ret);
+               return ret;
+       }
        iio_trigger_set_immutable(indio_dev, dln2->trig);
 
        ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
index 6245434f8377954b635a6e6db556500b75acbd67..8cd258cb2682e6620bac9e7963670cf927be6eff 100644 (file)
@@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
 {
        struct stm32_adc *adc = iio_priv(indio_dev);
 
+       stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
        stm32h7_adc_disable(indio_dev);
        stm32_adc_int_ch_disable(adc);
        stm32h7_adc_enter_pwr_down(adc);
@@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
                        /* Get calibration data for vrefint channel */
                        ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
                        if (ret && ret != -ENOENT) {
-                               return dev_err_probe(&indio_dev->dev, ret,
+                               return dev_err_probe(indio_dev->dev.parent, ret,
                                                     "nvmem access error\n");
                        }
                        if (ret == -ENOENT)
index 3e0734ddafe363355c17614a3c14b885ae7164ea..600e9725da7882ce2f143217d0f94844d4e104a7 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/bitfield.h>
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
                goto err_unlock;
        }
 
-       *val = temp;
+       *val = sign_extend32(temp, 15);
 
 err_unlock:
        mutex_unlock(&st->lock);
@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
        }
 
        /* extract lower 12 bits temperature reading */
-       *val = temp & 0x0FFF;
+       *val = sign_extend32(temp, 11);
 
 err_unlock:
        mutex_unlock(&st->lock);
index 04dd6a7969ea79cf62a3c5bb2e0d6582a83c0e24..4cfa0d43956053e173686fa85ac708903396e50c 100644 (file)
@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
 
        iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
 
+error_ret:
        iio_trigger_notify_done(indio_dev->trig);
 
-error_ret:
        return IRQ_HANDLED;
 }
 
index b23caa2f2aa1fd30327ee5f5c1ed1251e6b3768a..93990ff1dfe39e5a760e9b8c82099f4af8f5a282 100644 (file)
@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
                irq_modify_status(trig->subirq_base + i,
                                  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
        }
-       get_device(&trig->dev);
 
        return trig;
 
index 7e51aaac0bf86db63dd1717b816bfd3b3f6a3b12..b2983b1a9ed1cc05ddda4ec5a08372b0b2cac493 100644 (file)
@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
                ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
                                       als_buf, sizeof(als_buf));
                if (ret < 0)
-                       return ret;
+                       goto done;
                if (test_bit(0, indio_dev->active_scan_mask))
                        scan.channels[j++] = le16_to_cpu(als_buf[1]);
                if (test_bit(1, indio_dev->active_scan_mask))
index 07e91846307c7d38474b46e2fafc68767b997724..fc63856ed54debe80b55e815f43a6e9cfc8ba6e6 100644 (file)
@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        mutex_lock(&data->lock);
        ret = regmap_field_read(data->reg_flag_nf, &dir);
        if (ret < 0) {
-               dev_err(&data->client->dev, "register read failed\n");
-               mutex_unlock(&data->lock);
-               return ret;
+               dev_err(&data->client->dev, "register read failed: %d\n", ret);
+               goto out;
        }
        event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
                                     IIO_EV_TYPE_THRESH,
@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        ret = regmap_field_write(data->reg_flag_psint, 0);
        if (ret < 0)
                dev_err(&data->client->dev, "failed to reset interrupts\n");
+out:
        mutex_unlock(&data->lock);
 
        return IRQ_HANDLED;
index 33083877cd19d134857f647b8f80030c7cc7e3a1..4353b749ecef2795cd11d055f30f4c364397208b 100644 (file)
@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
 };
 module_platform_driver(stm32_timer_trigger_driver);
 
-MODULE_ALIAS("platform: stm32-timer-trigger");
+MODULE_ALIAS("platform:stm32-timer-trigger");
 MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
 MODULE_LICENSE("GPL v2");
index ec37f4fd8e96bac28efd784d757bc6ff12e1dbb0..f1245c94ae2629d329d68e19349360b37bf7f725 100644 (file)
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
  */
 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
 {
+       if (!rcd->rcvhdrq)
+               return;
        clear_recv_intr(rcd);
        if (check_packet_present(rcd))
                force_recv_intr(rcd);
index 61f341c3005cb972dc56a8c34e64fbbc7cd03131..e2c634af40e990103d9010c5e0eab5cd80faa663 100644 (file)
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
        struct hfi1_packet packet;
        int skip_pkt = 0;
 
+       if (!rcd->rcvhdrq)
+               return RCV_PKT_OK;
        /* Control context will always use the slow path interrupt handler */
        needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
 
index dbd1c31830b9c22e0f405db3149d5c9effdaf59b..4436ed41547c4fcc82479a6a5baaaa75dcc87e44 100644 (file)
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
        rcd->fast_handler = get_dma_rtail_setting(rcd) ?
                                handle_receive_interrupt_dma_rtail :
                                handle_receive_interrupt_nodma_rtail;
-       rcd->slow_handler = handle_receive_interrupt;
 
        hfi1_set_seq_cnt(rcd, 1);
 
@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
                rcd->numa_id = numa;
                rcd->rcv_array_groups = dd->rcv_entries.ngroups;
                rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+               rcd->slow_handler = handle_receive_interrupt;
+               rcd->do_interrupt = rcd->slow_handler;
                rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
 
                mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
        if (ret)
                goto done;
 
-       /* allocate dummy tail memory for all receive contexts */
-       dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
-                                                        sizeof(u64),
-                                                        &dd->rcvhdrtail_dummy_dma,
-                                                        GFP_KERNEL);
-
-       if (!dd->rcvhdrtail_dummy_kvaddr) {
-               dd_dev_err(dd, "cannot allocate dummy tail memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
        /* dd->rcd can be NULL if early initialization failed */
        for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
                /*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
                if (!rcd)
                        continue;
 
-               rcd->do_interrupt = &handle_receive_interrupt;
-
                lastfail = hfi1_create_rcvhdrq(dd, rcd);
                if (!lastfail)
                        lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
        rcd->egrbufs.rcvtids = NULL;
 
        for (e = 0; e < rcd->egrbufs.alloced; e++) {
-               if (rcd->egrbufs.buffers[e].dma)
+               if (rcd->egrbufs.buffers[e].addr)
                        dma_free_coherent(&dd->pcidev->dev,
                                          rcd->egrbufs.buffers[e].len,
                                          rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
        dd->tx_opstats    = NULL;
        kfree(dd->comp_vect);
        dd->comp_vect = NULL;
+       if (dd->rcvhdrtail_dummy_kvaddr)
+               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
+                                 dd->rcvhdrtail_dummy_dma);
+       dd->rcvhdrtail_dummy_kvaddr = NULL;
        sdma_clean(dd, dd->num_sdma);
        rvt_dealloc_device(&dd->verbs_dev.rdi);
 }
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
 
+       /* allocate dummy tail memory for all receive contexts */
+       dd->rcvhdrtail_dummy_kvaddr =
+               dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+                                  &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+       if (!dd->rcvhdrtail_dummy_kvaddr) {
+               ret = -ENOMEM;
+               goto bail;
+       }
+
        atomic_set(&dd->ipoib_rsm_usr_num, 0);
        return dd;
 
@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
 
        free_credit_return(dd);
 
-       if (dd->rcvhdrtail_dummy_kvaddr) {
-               dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
-                                 (void *)dd->rcvhdrtail_dummy_kvaddr,
-                                 dd->rcvhdrtail_dummy_dma);
-               dd->rcvhdrtail_dummy_kvaddr = NULL;
-       }
-
        /*
         * Free any resources still in use (usually just kernel contexts)
         * at unload; we do for ctxtcnt, because that's what we allocate.
index 2b6c24b7b58655b2d847585db6db71c4c5d85b13..f07d328689d3d4de4e554867aa08d283c95ebb74 100644 (file)
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
        if (current->nr_cpus_allowed != 1)
                goto out;
 
-       cpu_id = smp_processor_id();
        rcu_read_lock();
+       cpu_id = smp_processor_id();
        rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
                                     sdma_rht_params);
 
index 9bfbaddd1763de473a5f15dd7243b1f382d9b82b..eb0defa80d0dcb1ad90382220ade69525d37407b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/acpi.h>
 #include <linux/etherdevice.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <net/addrconf.h>
@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
                                        unsigned long instance_stage,
                                        unsigned long reset_stage)
 {
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
        struct hns_roce_v2_priv *priv = hr_dev->priv;
        struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+       unsigned long val;
+       int ret;
 
        /* When hardware reset is detected, we should stop sending mailbox&cmq&
         * doorbell to hardware. If now in .init_instance() function, we should
@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
         * again.
         */
        hr_dev->dis_db = true;
-       if (!ops->get_hw_reset_stat(handle))
+
+       ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+                               val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+                               HW_RESET_TIMEOUT_US, false, handle);
+       if (!ret)
                hr_dev->is_reset = true;
 
        if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
@@ -1584,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
 {
        struct hns_roce_cmq_desc desc;
        struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+       u32 clock_cycles_of_1us;
 
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
                                      false);
 
-       hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+               clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
+       else
+               clock_cycles_of_1us = HNS_ROCE_1US_CFG;
+
+       hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
        hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
 
        return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -4792,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
        return ret;
 }
 
+static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+{
+#define QP_ACK_TIMEOUT_MAX_HIP08 20
+#define QP_ACK_TIMEOUT_OFFSET 10
+#define QP_ACK_TIMEOUT_MAX 31
+
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
+                       ibdev_warn(&hr_dev->ib_dev,
+                                  "Local ACK timeout shall be 0 to 20.\n");
+                       return false;
+               }
+               *timeout += QP_ACK_TIMEOUT_OFFSET;
+       } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+               if (*timeout > QP_ACK_TIMEOUT_MAX) {
+                       ibdev_warn(&hr_dev->ib_dev,
+                                  "Local ACK timeout shall be 0 to 31.\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
                                      const struct ib_qp_attr *attr,
                                      int attr_mask,
@@ -4801,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        int ret = 0;
+       u8 timeout;
 
        if (attr_mask & IB_QP_AV) {
                ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@@ -4810,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
        }
 
        if (attr_mask & IB_QP_TIMEOUT) {
-               if (attr->timeout < 31) {
-                       hr_reg_write(context, QPC_AT, attr->timeout);
+               timeout = attr->timeout;
+               if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
+                       hr_reg_write(context, QPC_AT, timeout);
                        hr_reg_clear(qpc_mask, QPC_AT);
-               } else {
-                       ibdev_warn(&hr_dev->ib_dev,
-                                  "Local ACK timeout shall be 0 to 30.\n");
                }
        }
 
@@ -4872,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
                set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
 
        if (attr_mask & IB_QP_MIN_RNR_TIMER) {
-               hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
+               hr_reg_write(context, QPC_MIN_RNR_TIME,
+                           hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+                           HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
                hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
        }
 
@@ -5489,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 
        hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
        hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+                       dev_info(hr_dev->dev,
+                                "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+                                cq_period);
+                       cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+               }
+               cq_period *= HNS_ROCE_CLOCK_ADJUST;
+       }
        hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
        hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
 
@@ -5884,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
        hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
        hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
 
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+               if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+                       dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
+                                eq->eq_period);
+                       eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
+               }
+               eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
+       }
+
        hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
        hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
        hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
@@ -6387,10 +6447,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
        if (!hr_dev)
                return 0;
 
-       hr_dev->is_reset = true;
        hr_dev->active = false;
        hr_dev->dis_db = true;
-
        hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
 
        return 0;
index 4d904d5e82be4efb7efb7b91b4dc656e00535b41..35c61da7ba156be04bb76d7cb041cc99eba0161b 100644 (file)
@@ -1444,6 +1444,14 @@ struct hns_roce_dip {
        struct list_head node;  /* all dips are on a list */
 };
 
+/* only for RNR timeout issue of HIP08 */
+#define HNS_ROCE_CLOCK_ADJUST 1000
+#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_EQ_PERIOD 65
+#define HNS_ROCE_RNR_TIMER_10NS 1
+#define HNS_ROCE_1US_CFG 999
+#define HNS_ROCE_1NS_CFG 0
+
 #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
 #define HNS_ROCE_AEQ_DEFAULT_INTERVAL  0x0
 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
index 6eee9deadd122c0ec1263c12fa7b5ceffe50aca4..e64ef6903fb4f627c822e34417b9a5f0ea544980 100644 (file)
@@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
 
 static void free_srq_wrid(struct hns_roce_srq *srq)
 {
-       kfree(srq->wrid);
+       kvfree(srq->wrid);
        srq->wrid = NULL;
 }
 
index 4108dcabece2390229ff3efd014a1551cccfb6e8..b4c657f5f2f95cb6d0c2c681e51d5d9579133717 100644 (file)
@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
 {
        struct irdma_cq *cq = iwcq->back_cq;
 
+       if (!cq->user_mode)
+               cq->armed = false;
        if (cq->ibcq.comp_handler)
                cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 }
@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
                qp->flush_code = FLUSH_PROT_ERR;
                break;
        case IRDMA_AE_AMP_BAD_QP:
+       case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
                qp->flush_code = FLUSH_LOC_QP_OP_ERR;
                break;
        case IRDMA_AE_AMP_BAD_STAG_KEY:
@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_PRIV_OPERATION_DENIED:
        case IRDMA_AE_IB_INVALID_REQUEST:
        case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
-       case IRDMA_AE_IB_REMOTE_OP_ERROR:
                qp->flush_code = FLUSH_REM_ACCESS_ERR;
                qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
                break;
@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
        case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
                qp->flush_code = FLUSH_MW_BIND_ERR;
                break;
+       case IRDMA_AE_IB_REMOTE_OP_ERROR:
+               qp->flush_code = FLUSH_REM_OP_ERR;
+               break;
        default:
                qp->flush_code = FLUSH_FATAL_ERR;
                break;
index 91a497139ba3a690af37f75c5342480d43c7b8d3..cb218cab79ac12d9e9f195d4075bf0aa77156d55 100644 (file)
@@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
                    void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
                    void *cb_param);
 void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
 int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
                         void *ptr);
 int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
index aeeb1c310965decb8b3110cf8a93101c73500e65..fed49da770f3b66eec5a335941b5bd69914493d3 100644 (file)
@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                list_del(&chunk->list);
                if (chunk->type == PBLE_SD_PAGED)
                        irdma_pble_free_paged_mem(chunk);
-               if (chunk->bitmapbuf)
-                       kfree(chunk->bitmapmem.va);
+               bitmap_free(chunk->bitmapbuf);
                kfree(chunk->chunkmem.va);
        }
 }
@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                  "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
                  pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
        pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
-       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
                             sd_entry->u.pd_table.pd_page_addr.pa :
                             sd_entry->u.bp.addr.pa;
@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
                        goto error;
        }
 
+       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
        sd_entry->valid = true;
        return 0;
 
 error:
-       if (chunk->bitmapbuf)
-               kfree(chunk->bitmapmem.va);
+       bitmap_free(chunk->bitmapbuf);
        kfree(chunk->chunkmem.va);
 
        return ret_code;
index e1b3b8118a2ca4cd2d348f6c0c4414ddc2b95ca4..aa20827dcc9de53ef47ab241e87013d7bfe4c5bb 100644 (file)
@@ -78,7 +78,6 @@ struct irdma_chunk {
        u32 pg_cnt;
        enum irdma_alloc_type type;
        struct irdma_sc_dev *dev;
-       struct irdma_virt_mem bitmapmem;
        struct irdma_virt_mem chunkmem;
 };
 
index 8b42c43fc14fe89dfd504c90edffcf9975daa464..398736d8c78a4d0427830e2f18d70be80f415b0a 100644 (file)
@@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
 
        sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
 
-       pchunk->bitmapmem.size = sizeofbitmap >> 3;
-       pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
-       if (!pchunk->bitmapmem.va)
+       pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+       if (!pchunk->bitmapbuf)
                return IRDMA_ERR_NO_MEMORY;
 
-       pchunk->bitmapbuf = pchunk->bitmapmem.va;
-       bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
        pchunk->sizeofbitmap = sizeofbitmap;
        /* each pble is 8 bytes hence shift by 3 */
        pprm->total_pble_alloc += pchunk->size >> 3;
@@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
        ibevent.element.qp = &iwqp->ibqp;
        iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
 }
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+       struct irdma_cq_uk *ukcq;
+       u64 qword3;
+       __le64 *cqe;
+       u8 polarity;
+
+       ukcq  = &iwcq->sc_cq.cq_uk;
+       cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+       get_64bit_val(cqe, 24, &qword3);
+       polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+       return polarity != ukcq->polarity;
+}
index 0f66e809d41850e39516f7465bcf8f2476639475..8cd5f9261692d45f00baa80d199f30cdaca37a15 100644 (file)
@@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
        struct irdma_cq *iwcq;
        struct irdma_cq_uk *ukcq;
        unsigned long flags;
-       enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+       enum irdma_cmpl_notify cq_notify;
+       bool promo_event = false;
+       int ret = 0;
 
+       cq_notify = notify_flags == IB_CQ_SOLICITED ?
+                   IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
        iwcq = to_iwcq(ibcq);
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_SOLICITED)
-               cq_notify = IRDMA_CQ_COMPL_SOLICITED;
 
        spin_lock_irqsave(&iwcq->lock, flags);
-       irdma_uk_cq_request_notification(ukcq, cq_notify);
+       /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+       if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+               promo_event = true;
+
+       if (!iwcq->armed || promo_event) {
+               iwcq->armed = true;
+               iwcq->last_notify = cq_notify;
+               irdma_uk_cq_request_notification(ukcq, cq_notify);
+       }
+
+       if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+               ret = 1;
        spin_unlock_irqrestore(&iwcq->lock, flags);
 
-       return 0;
+       return ret;
 }
 
 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
index 5c244cd321a3ac79fac18fe60f2b5072fb366066..d0fdef8d09eada33149faed94ed2633577c7479b 100644 (file)
@@ -110,6 +110,8 @@ struct irdma_cq {
        u16 cq_size;
        u16 cq_num;
        bool user_mode;
+       bool armed;
+       enum irdma_cmpl_notify last_notify;
        u32 polled_cmpls;
        u32 cq_mem_size;
        struct irdma_dma_mem kmem;
index e636e954f6bf2a30061ed41d2dcaa4a91557e9e7..4a7a56ed740b9b35e84b40e870ec70e7cf7feffd 100644 (file)
@@ -664,7 +664,6 @@ struct mlx5_ib_mr {
 
        /* User MR data */
        struct mlx5_cache_ent *cache_ent;
-       struct ib_umem *umem;
 
        /* This is zero'd when the MR is allocated */
        union {
@@ -676,7 +675,7 @@ struct mlx5_ib_mr {
                        struct list_head list;
                };
 
-               /* Used only by kernel MRs (umem == NULL) */
+               /* Used only by kernel MRs */
                struct {
                        void *descs;
                        void *descs_alloc;
@@ -697,8 +696,9 @@ struct mlx5_ib_mr {
                        int data_length;
                };
 
-               /* Used only by User MRs (umem != NULL) */
+               /* Used only by User MRs */
                struct {
+                       struct ib_umem *umem;
                        unsigned int page_shift;
                        /* Current access_flags */
                        int access_flags;
index 157d862fb86429ba3ba51cd18e98adf632f4d1cf..63e2129f1142ba5e49ffac9d356fd18d2418f44a 100644 (file)
@@ -1904,19 +1904,18 @@ err:
        return ret;
 }
 
-static void
-mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
 {
-       if (!mr->umem && mr->descs) {
-               struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_descs * mr->desc_size;
-               struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+       int size = mr->max_descs * mr->desc_size;
 
-               dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
-                                DMA_TO_DEVICE);
-               kfree(mr->descs_alloc);
-               mr->descs = NULL;
-       }
+       if (!mr->descs)
+               return;
+
+       dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+                        DMA_TO_DEVICE);
+       kfree(mr->descs_alloc);
+       mr->descs = NULL;
 }
 
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1992,7 +1991,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
        if (mr->cache_ent) {
                mlx5_mr_cache_free(dev, mr);
        } else {
-               mlx5_free_priv_descs(mr);
+               if (!udata)
+                       mlx5_free_priv_descs(mr);
                kfree(mr);
        }
        return 0;
@@ -2079,7 +2079,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
        if (err)
                goto err_free_in;
 
-       mr->umem = NULL;
        kfree(in);
 
        return mr;
@@ -2206,7 +2205,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
        }
 
        mr->ibmr.device = pd->device;
-       mr->umem = NULL;
 
        switch (mr_type) {
        case IB_MR_TYPE_MEM_REG:
index ac11943a5ddb0f28f83f08b93a0fdae6b7c6a75a..bf2f30d67949dc386404a632c1ec28834d7269ff 100644 (file)
@@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
                                               &addrlimit) ||
                            addrlimit > type_max(typeof(pkt->addrlimit))) {
                                ret = -EINVAL;
-                               goto free_pbc;
+                               goto free_pkt;
                        }
                        pkt->addrlimit = addrlimit;
 
index 975321812c870ff544c39a560ed7dd6a8d6ffc06..54b8711321c1e58753e7df39af6d404dc63ff443 100644 (file)
@@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 
 err2:
        rxe_queue_cleanup(qp->sq.queue);
+       qp->sq.queue = NULL;
 err1:
        qp->pd = NULL;
        qp->rcq = NULL;
index f7e459fe68be6e04e2adc31525daf5872942b892..76e4352fe3f63e6aaf17576e655ce63628264d90 100644 (file)
@@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
        int cpu;
 
        cpu = raw_smp_processor_id();
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        if (con->cpu != cpu) {
                s->cpu_migr.to++;
 
@@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
                s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
                atomic_inc(&s->cpu_migr.from);
        }
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.failover_cnt++;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
 {
        struct rtrs_clt_stats_pcpu *s;
 
-       s = this_cpu_ptr(stats->pcpu_stats);
+       s = get_cpu_ptr(stats->pcpu_stats);
        s->rdma.dir[d].cnt++;
        s->rdma.dir[d].size_total += size;
+       put_cpu_ptr(stats->pcpu_stats);
 }
 
 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
index d57e996732cf4d9f8ddc2b7960bc808c64a9a67b..23b5dd9552dcc0539058a17ed2a19367cbb8add4 100644 (file)
@@ -456,9 +456,10 @@ struct iqs626_private {
        unsigned int suspend_mode;
 };
 
-static int iqs626_parse_events(struct iqs626_private *iqs626,
-                              const struct fwnode_handle *ch_node,
-                              enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_events(struct iqs626_private *iqs626,
+                   const struct fwnode_handle *ch_node,
+                   enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
@@ -604,9 +605,10 @@ static int iqs626_parse_events(struct iqs626_private *iqs626,
        return 0;
 }
 
-static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
-                                  const struct fwnode_handle *ch_node,
-                                  enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_ati_target(struct iqs626_private *iqs626,
+                       const struct fwnode_handle *ch_node,
+                       enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
@@ -885,9 +887,10 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
        return 0;
 }
 
-static int iqs626_parse_channel(struct iqs626_private *iqs626,
-                               const struct fwnode_handle *ch_node,
-                               enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_channel(struct iqs626_private *iqs626,
+                    const struct fwnode_handle *ch_node,
+                    enum iqs626_ch_id ch_id)
 {
        struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
        struct i2c_client *client = iqs626->client;
index 956d9cd34796485cfde3b73da2300cea199cfe3f..ece97f8c6a3e399365b3f7c6a7e5d3f923c06ca7 100644 (file)
@@ -1588,7 +1588,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
  */
 static int elantech_change_report_id(struct psmouse *psmouse)
 {
-       unsigned char param[2] = { 0x10, 0x03 };
+       /*
+        * NOTE: the code is expecting to receive param[] as an array of 3
+        * items (see __ps2_command()), even if in this case only 2 are
+        * actually needed. Make sure the array size is 3 to avoid potential
+        * stack out-of-bound accesses.
+        */
+       unsigned char param[3] = { 0x10, 0x03 };
 
        if (elantech_write_reg_params(psmouse, 0x7, param) ||
            elantech_read_reg_params(psmouse, 0x7, param) ||
index aedd0554104435e994793d7ad28cd2e26292a913..148a7c5fd0e22be932e922c6ba641b6df361e7b2 100644 (file)
@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
        { }
 };
 
+static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+       {
+               /* ASUS ZenBook UX425UA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+               },
+       },
+       {
+               /* ASUS ZenBook UM325UA */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+               },
+       },
+       { }
+};
+
 #endif /* CONFIG_X86 */
 
 #ifdef CONFIG_PNP
@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
        if (dmi_check_system(i8042_dmi_kbdreset_table))
                i8042_kbdreset = true;
 
+       if (dmi_check_system(i8042_dmi_probe_defer_table))
+               i8042_probe_defer = true;
+
        /*
         * A20 was already enabled during early kernel init. But some buggy
         * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
index 0b9f1d0a8f8b0a703d1acf46058f27cdc460a441..3fc0a89cc785cb6b76bcaf49be8b573fb1682c8d 100644 (file)
@@ -45,6 +45,10 @@ static bool i8042_unlock;
 module_param_named(unlock, i8042_unlock, bool, 0);
 MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
 
+static bool i8042_probe_defer;
+module_param_named(probe_defer, i8042_probe_defer, bool, 0);
+MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
+
 enum i8042_controller_reset_mode {
        I8042_RESET_NEVER,
        I8042_RESET_ALWAYS,
@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
  * LCS/Telegraphics.
  */
 
-static int __init i8042_check_mux(void)
+static int i8042_check_mux(void)
 {
        unsigned char mux_version;
 
@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
 /*
  * The following is used to test AUX IRQ delivery.
  */
-static struct completion i8042_aux_irq_delivered __initdata;
-static bool i8042_irq_being_tested __initdata;
+static struct completion i8042_aux_irq_delivered;
+static bool i8042_irq_being_tested;
 
-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
+static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
 {
        unsigned long flags;
        unsigned char str, data;
@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
  * verifies success by readinng CTR. Used when testing for presence of AUX
  * port.
  */
-static int __init i8042_toggle_aux(bool on)
+static int i8042_toggle_aux(bool on)
 {
        unsigned char param;
        int i;
@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
  * the presence of an AUX interface.
  */
 
-static int __init i8042_check_aux(void)
+static int i8042_check_aux(void)
 {
        int retval = -1;
        bool irq_registered = false;
@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
 
                if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
                        pr_err("Can't read CTR while initializing i8042\n");
-                       return -EIO;
+                       return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
                }
 
        } while (n < 2 || ctr[0] != ctr[1]);
@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
        i8042_controller_reset(false);
 }
 
-static int __init i8042_create_kbd_port(void)
+static int i8042_create_kbd_port(void)
 {
        struct serio *serio;
        struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
        return 0;
 }
 
-static int __init i8042_create_aux_port(int idx)
+static int i8042_create_aux_port(int idx)
 {
        struct serio *serio;
        int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
        return 0;
 }
 
-static void __init i8042_free_kbd_port(void)
+static void i8042_free_kbd_port(void)
 {
        kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
        i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
 }
 
-static void __init i8042_free_aux_ports(void)
+static void i8042_free_aux_ports(void)
 {
        int i;
 
@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
        }
 }
 
-static void __init i8042_register_ports(void)
+static void i8042_register_ports(void)
 {
        int i;
 
@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
        i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
 }
 
-static int __init i8042_setup_aux(void)
+static int i8042_setup_aux(void)
 {
        int (*aux_enable)(void);
        int error;
@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
        return error;
 }
 
-static int __init i8042_setup_kbd(void)
+static int i8042_setup_kbd(void)
 {
        int error;
 
@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
        return 0;
 }
 
-static int __init i8042_probe(struct platform_device *dev)
+static int i8042_probe(struct platform_device *dev)
 {
        int error;
 
@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
                .pm     = &i8042_pm_ops,
 #endif
        },
+       .probe          = i8042_probe,
        .remove         = i8042_remove,
        .shutdown       = i8042_shutdown,
 };
@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
 
 static int __init i8042_init(void)
 {
-       struct platform_device *pdev;
        int err;
 
        dbg_init();
@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
        /* Set this before creating the dev to allow i8042_command to work right away */
        i8042_present = true;
 
-       pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
-       if (IS_ERR(pdev)) {
-               err = PTR_ERR(pdev);
+       err = platform_driver_register(&i8042_driver);
+       if (err)
                goto err_platform_exit;
+
+       i8042_platform_device = platform_device_alloc("i8042", -1);
+       if (!i8042_platform_device) {
+               err = -ENOMEM;
+               goto err_unregister_driver;
        }
 
+       err = platform_device_add(i8042_platform_device);
+       if (err)
+               goto err_free_device;
+
        bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
        panic_blink = i8042_panic_blink;
 
        return 0;
 
+err_free_device:
+       platform_device_put(i8042_platform_device);
+err_unregister_driver:
+       platform_driver_unregister(&i8042_driver);
  err_platform_exit:
        i8042_platform_exit();
        return err;
index 05de92c0293bca82889c7a94912e748368b8b1b6..eb66cd2689b7c64487910cacc75a97f231e804ef 100644 (file)
@@ -1882,7 +1882,7 @@ static int mxt_read_info_block(struct mxt_data *data)
        if (error) {
                dev_err(&client->dev, "Error %d parsing object table\n", error);
                mxt_free_object_table(data);
-               goto err_free_mem;
+               return error;
        }
 
        data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
index 7e13a66a8a95c8f9729ba7791df4cb97737b5a49..879a4d984c9078fecad5b88f395218396663ec7f 100644 (file)
 #define ELAN_POWERON_DELAY_USEC        500
 #define ELAN_RESET_DELAY_MSEC  20
 
+/* FW boot code version */
+#define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C        0x72
+#define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C        0x82
+#define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C        0x92
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C        0x6D
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C        0x6E
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C       0x77
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C       0x78
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB    0x67
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB    0x68
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB   0x74
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB   0x75
+
 enum elants_chip_id {
        EKTH3500,
        EKTF3624,
@@ -736,6 +749,37 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
        return 0;
 }
 
+static bool elants_i2c_should_check_remark_id(struct elants_data *ts)
+{
+       struct i2c_client *client = ts->client;
+       const u8 bootcode_version = ts->iap_version;
+       bool check;
+
+       /* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */
+       if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) ||
+           (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) {
+               dev_dbg(&client->dev,
+                       "eKTH3900/eKTH5312(0x%02x) are not support remark id\n",
+                       bootcode_version);
+               check = false;
+       } else if (bootcode_version >= 0x60) {
+               check = true;
+       } else {
+               check = false;
+       }
+
+       return check;
+}
+
 static int elants_i2c_do_update_firmware(struct i2c_client *client,
                                         const struct firmware *fw,
                                         bool force)
@@ -749,7 +793,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
        u16 send_id;
        int page, n_fw_pages;
        int error;
-       bool check_remark_id = ts->iap_version >= 0x60;
+       bool check_remark_id = elants_i2c_should_check_remark_id(ts);
 
        /* Recovery mode detection! */
        if (force) {
index b5cc9178819530bc7605957efa8709cf7633caa4..aaa3c455e01eac4dd2616ee5340b22df209543cf 100644 (file)
@@ -102,6 +102,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
        { .id = "911", .data = &gt911_chip_data },
        { .id = "9271", .data = &gt911_chip_data },
        { .id = "9110", .data = &gt911_chip_data },
+       { .id = "9111", .data = &gt911_chip_data },
        { .id = "927", .data = &gt911_chip_data },
        { .id = "928", .data = &gt911_chip_data },
 
@@ -650,10 +651,16 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
 
        usleep_range(6000, 10000);              /* T4: > 5ms */
 
-       /* end select I2C slave addr */
-       error = gpiod_direction_input(ts->gpiod_rst);
-       if (error)
-               goto error;
+       /*
+        * Put the reset pin back in to input / high-impedance mode to save
+        * power. Only do this in the non ACPI case since some ACPI boards
+        * don't have a pull-up, so there the reset pin must stay active-high.
+        */
+       if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
+               error = gpiod_direction_input(ts->gpiod_rst);
+               if (error)
+                       goto error;
+       }
 
        return 0;
 
@@ -787,6 +794,14 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                return -EINVAL;
        }
 
+       /*
+        * Normally we put the reset pin in input / high-impedance mode to save
+        * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
+        * case, leave the pin as is. This results in the pin not being touched
+        * at all on x86/ACPI boards, except when needed for error-recover.
+        */
+       ts->gpiod_rst_flags = GPIOD_ASIS;
+
        return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
 }
 #else
@@ -812,6 +827,12 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
                return -EINVAL;
        dev = &ts->client->dev;
 
+       /*
+        * By default we request the reset pin as input, leaving it in
+        * high-impedance when not resetting the controller to save power.
+        */
+       ts->gpiod_rst_flags = GPIOD_IN;
+
        ts->avdd28 = devm_regulator_get(dev, "AVDD28");
        if (IS_ERR(ts->avdd28)) {
                error = PTR_ERR(ts->avdd28);
@@ -849,7 +870,7 @@ retry_get_irq_gpio:
        ts->gpiod_int = gpiod;
 
        /* Get the reset line GPIO pin number */
-       gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
+       gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
        if (IS_ERR(gpiod)) {
                error = PTR_ERR(gpiod);
                if (error != -EPROBE_DEFER)
index 62138f930d1aaa010daeeb9059b83cd8d30d4ff9..02065d1c326354a87aa3f1270c7435115d621223 100644 (file)
@@ -87,6 +87,7 @@ struct goodix_ts_data {
        struct gpio_desc *gpiod_rst;
        int gpio_count;
        int gpio_int_idx;
+       enum gpiod_flags gpiod_rst_flags;
        char id[GOODIX_ID_MAX_LEN + 1];
        char cfg_name[64];
        u16 version;
index c1e7a241307822933cc2b65d046fdade5cc5ed06..191d4f38d991ec52493f66d4a0b4d474d1f8df38 100644 (file)
@@ -207,7 +207,7 @@ static int goodix_firmware_upload(struct goodix_ts_data *ts)
 
        error = goodix_reset_no_int_sync(ts);
        if (error)
-               return error;
+               goto release;
 
        error = goodix_enter_upload_mode(ts->client);
        if (error)
index 3759dc36cc8f73f664e10e8ed6c8fd2ceca1754f..2543ef65825b903438f16331de4c92f864fe125e 100644 (file)
@@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = {
        .free = aic_ipi_free,
 };
 
-static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
 {
        struct irq_domain *ipi_domain;
        int base_ipi;
index 80906bfec845f7c2f25b06ce2adabf06164bce22..5b8d571c041dccfe80fbad1756ebfd77cf7fb7ed 100644 (file)
@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
        int hwirq, i;
 
        mutex_lock(&msi_used_lock);
+       hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+                                       order_base_2(nr_irqs));
+       mutex_unlock(&msi_used_lock);
 
-       hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
-                                          0, nr_irqs, 0);
-       if (hwirq >= PCI_MSI_DOORBELL_NR) {
-               mutex_unlock(&msi_used_lock);
+       if (hwirq < 0)
                return -ENOSPC;
-       }
-
-       bitmap_set(msi_used, hwirq, nr_irqs);
-       mutex_unlock(&msi_used_lock);
 
        for (i = 0; i < nr_irqs; i++) {
                irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
                                    NULL, NULL);
        }
 
-       return hwirq;
+       return 0;
 }
 
 static void armada_370_xp_msi_free(struct irq_domain *domain,
@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 
        mutex_lock(&msi_used_lock);
-       bitmap_clear(msi_used, d->hwirq, nr_irqs);
+       bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
        mutex_unlock(&msi_used_lock);
 }
 
index f3c6855a4cefba5735ca64c73e9a76ffd82e6ab7..18b77c3e6db4ba939b79152b5df8b79316c60e86 100644 (file)
@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
                generic_handle_domain_irq(scu_ic->irq_domain,
                                          bit - scu_ic->irq_shift);
 
-               regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
-                                  BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+               regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+                                 BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
        }
 
        chained_irq_exit(chip, desc);
index d80e67a6aad2a13c21a4c38fe5eceaad44dd967f..bb6609cebdbce9bc907bbf13fb46e472001064c0 100644 (file)
@@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
        }
 
        data->num_parent_irqs = platform_irq_count(pdev);
+       put_device(&pdev->dev);
        if (data->num_parent_irqs <= 0) {
                pr_err("invalid number of parent interrupts\n");
                ret = -ENOMEM;
index eb0882d1536661475086a132bb0db4efee9ae4e6..0cb584d9815b96b3d219a3fc493bc35981fc6413 100644 (file)
@@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return NULL;
+       return desc->its_invall_cmd.col;
 }
 
 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
index d02b05a067d950a0834a17f511af711603e1819b..ff89b36267dd4955e2a1a25184edb0daaf944e73 100644 (file)
@@ -9,6 +9,7 @@
 
 #define pr_fmt(fmt) "irq-mips-gic: " fmt
 
+#include <linux/bitfield.h>
 #include <linux/bitmap.h>
 #include <linux/clocksource.h>
 #include <linux/cpuhotplug.h>
@@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node,
        mips_gic_base = ioremap(gic_base, gic_len);
 
        gicconfig = read_gic_config();
-       gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
-       gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+       gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
        gic_shared_intrs = (gic_shared_intrs + 1) * 8;
 
        if (cpu_has_veic) {
index 63bac3f78863a71d5e6fce81526ad0715185db24..ba4759b3e26930181873282e55513c163d646f38 100644 (file)
@@ -26,7 +26,7 @@
 
 #define NVIC_ISER              0x000
 #define NVIC_ICER              0x080
-#define NVIC_IPR               0x300
+#define NVIC_IPR               0x400
 
 #define NVIC_MAX_BANKS         16
 /*
index 86b9e355c583760f564beaeb4988b9c628676cc7..140f35dc0c4579bdd473629bbf86344b42a21e77 100644 (file)
@@ -1139,6 +1139,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
 static void cached_dev_detach_finish(struct work_struct *w)
 {
        struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+       struct cache_set *c = dc->disk.c;
 
        BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
        BUG_ON(refcount_read(&dc->count));
@@ -1156,7 +1157,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
 
        bcache_device_detach(&dc->disk);
        list_move(&dc->list, &uncached_devices);
-       calc_cached_dev_sectors(dc->disk.c);
+       calc_cached_dev_sectors(c);
 
        clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
        clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
index 6319deccbe09eb0446ae53a06a3456f0011fe30a..7af242de3202ee4ba9bbffd3ee1992e06ad43fe8 100644 (file)
@@ -1963,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
                n_sectors -= bv.bv_len >> SECTOR_SHIFT;
                bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
 retry_kmap:
-               mem = bvec_kmap_local(&bv);
+               mem = kmap_local_page(bv.bv_page);
                if (likely(dio->op == REQ_OP_WRITE))
                        flush_dcache_page(bv.bv_page);
 
index 5111ed966947e2dcc3e2b01f02e6c5fad7259fcd..41d6e2383517bbf940210d15f2110d96fd421e91 100644 (file)
@@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
 
                if (!num_sectors || num_sectors > max_sectors)
                        num_sectors = max_sectors;
+               rdev->sb_start = sb_start;
        }
        sb = page_address(rdev->sb_page);
        sb->data_size = cpu_to_le64(num_sectors);
@@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
        spin_lock(&mddev->lock);
        mddev->pers = NULL;
        spin_unlock(&mddev->lock);
-       pers->free(mddev, mddev->private);
+       if (mddev->private)
+               pers->free(mddev, mddev->private);
        mddev->private = NULL;
        if (pers->sync_request && mddev->to_remove == NULL)
                mddev->to_remove = &md_redundancy_group;
index 70532335c7c7ed7b864c392720a89f968918b918..cb670f16e98e9a2003d6a6b7c52e83c50bb284b0 100644 (file)
@@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
 
                memcpy(n, dm_block_data(child),
                       dm_bm_block_size(dm_tm_get_bm(info->tm)));
-               dm_tm_unlock(info->tm, child);
 
                dm_tm_dec(info->tm, dm_block_location(child));
+               dm_tm_unlock(info->tm, child);
                return 0;
        }
 
index 8c72eb590f79dc8bd6bc92cdc7329c462d40f339..6ac509c1821c91b1bc3462beeff057d1f5c38111 100644 (file)
@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
        rtsx_pci_power_off(pcr, HOST_ENTER_S3);
 
-       free_irq(pcr->irq, (void *)pcr);
-
        mutex_unlock(&pcr->pcr_mutex);
 
        pcr->is_runtime_suspended = true;
@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
        mutex_lock(&pcr->pcr_mutex);
 
        rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
-       rtsx_pci_acquire_irq(pcr);
-       synchronize_irq(pcr->irq);
 
        if (pcr->ops->fetch_vendor_settings)
                pcr->ops->fetch_vendor_settings(pcr);
index 632325474233a11f0916047ed42338a9eb342d76..b38978a3b3ffa4f9ab5ae8b86d366de9880fe21d 100644 (file)
@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
 static int at25_probe(struct spi_device *spi)
 {
        struct at25_data        *at25 = NULL;
-       struct spi_eeprom       chip;
        int                     err;
        int                     sr;
        u8 id[FM25_ID_LEN];
@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
        if (match && !strcmp(match->compatible, "cypress,fm25"))
                is_fram = 1;
 
+       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+       if (!at25)
+               return -ENOMEM;
+
        /* Chip description */
-       if (!spi->dev.platform_data) {
-               if (!is_fram) {
-                       err = at25_fw_to_chip(&spi->dev, &chip);
-                       if (err)
-                               return err;
-               }
-       } else
-               chip = *(struct spi_eeprom *)spi->dev.platform_data;
+       if (spi->dev.platform_data) {
+               memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
+       } else if (!is_fram) {
+               err = at25_fw_to_chip(&spi->dev, &at25->chip);
+               if (err)
+                       return err;
+       }
 
        /* Ping the chip ... the status register is pretty portable,
         * unlike probing manufacturer IDs.  We do expect that system
@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
                return -ENXIO;
        }
 
-       at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
-       if (!at25)
-               return -ENOMEM;
-
        mutex_init(&at25->lock);
-       at25->chip = chip;
        at25->spi = spi;
        spi_set_drvdata(spi, at25);
 
@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
                        dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
                        return -ENODEV;
                }
-               chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+               at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
 
                if (at25->chip.byte_len > 64 * 1024)
                        at25->chip.flags |= EE_ADDR3;
@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
        at25->nvmem_config.name = dev_name(&spi->dev);
        at25->nvmem_config.dev = &spi->dev;
-       at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+       at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
        at25->nvmem_config.root_only = true;
        at25->nvmem_config.owner = THIS_MODULE;
        at25->nvmem_config.compat = true;
@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
        at25->nvmem_config.priv = at25;
        at25->nvmem_config.stride = 1;
        at25->nvmem_config.word_size = 1;
-       at25->nvmem_config.size = chip.byte_len;
+       at25->nvmem_config.size = at25->chip.byte_len;
 
        at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
        if (IS_ERR(at25->nvmem))
                return PTR_ERR(at25->nvmem);
 
        dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
-                (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
-                (chip.byte_len < 1024) ? "Byte" : "KByte",
+                (at25->chip.byte_len < 1024) ?
+                       at25->chip.byte_len : (at25->chip.byte_len / 1024),
+                (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
                 at25->chip.name, is_fram ? "fram" : "eeprom",
-                (chip.flags & EE_READONLY) ? " (readonly)" : "",
+                (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
                 at25->chip.page_size);
        return 0;
 }
index 39aca775371993f079410d62fd19620b86736007..4ccbf43e6bfa942bc40075760841dc1ddef7a447 100644 (file)
@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
 {
        u64 size = 0;
-       int i;
+       int oix;
 
        size = ALIGN(metalen, FASTRPC_ALIGN);
-       for (i = 0; i < ctx->nscalars; i++) {
+       for (oix = 0; oix < ctx->nbufs; oix++) {
+               int i = ctx->olaps[oix].raix;
+
                if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
 
-                       if (ctx->olaps[i].offset == 0)
+                       if (ctx->olaps[oix].offset == 0)
                                size = ALIGN(size, FASTRPC_ALIGN);
 
-                       size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
+                       size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
                }
        }
 
index 240c5af793dcebe03479df8066b7ef4c57cf78b4..368f10405e132ceedfb722278271bf99e422afeb 100644 (file)
@@ -2264,7 +2264,7 @@ void mmc_start_host(struct mmc_host *host)
        _mmc_detect_change(host, 0, false);
 }
 
-void mmc_stop_host(struct mmc_host *host)
+void __mmc_stop_host(struct mmc_host *host)
 {
        if (host->slot.cd_irq >= 0) {
                mmc_gpio_set_cd_wake(host, false);
@@ -2273,6 +2273,11 @@ void mmc_stop_host(struct mmc_host *host)
 
        host->rescan_disable = 1;
        cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+       __mmc_stop_host(host);
 
        /* clear pm flags now and let card drivers set them as needed */
        host->pm_flags = 0;
index 7931a4f0137d20e4ed2afc2b6eb0d80693132ed7..f5f3f623ea492660cf3653b2a1d09bd781ded0ca 100644 (file)
@@ -70,6 +70,7 @@ static inline void mmc_delay(unsigned int ms)
 
 void mmc_rescan(struct work_struct *work);
 void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
 void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
index d4683b1d263fd6dfa062fe014f3817e2201b8980..cf140f4ec864307e4c2788bc198df0c488a9638d 100644 (file)
@@ -80,9 +80,18 @@ static void mmc_host_classdev_release(struct device *dev)
        kfree(host);
 }
 
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+       struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+       __mmc_stop_host(host);
+       return 0;
+}
+
 static struct class mmc_host_class = {
        .name           = "mmc_host",
        .dev_release    = mmc_host_classdev_release,
+       .shutdown_pre   = mmc_host_classdev_shutdown,
        .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
 };
 
index 7cd9c0ec2fcfe97cd42c0458d9d6f32a67953545..8fdd0bbbfa21fb0ee77a769446f0769f04749ec4 100644 (file)
@@ -135,6 +135,7 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
                                    struct mmc_command *cmd)
 {
        struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+       bool manual_stop = false;
        u32 ictl, send;
        int pack_len;
 
@@ -172,12 +173,27 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
                else
                        /* software flush: */
                        ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+
+               /*
+                * Mimic the logic from the vendor driver where (only)
+                * SD_IO_RW_EXTENDED commands with more than one block set the
+                * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware
+                * download in the brcmfmac driver for a BCM43362/1 card.
+                * Without this sdio_memcpy_toio() (with a size of 219557
+                * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set.
+                */
+               manual_stop = cmd->data->blocks > 1 &&
+                             cmd->opcode == SD_IO_RW_EXTENDED;
        } else {
                pack_len = 0;
 
                ictl |= MESON_SDHC_ICTL_RESP_OK;
        }
 
+       regmap_update_bits(host->regmap, MESON_SDHC_MISC,
+                          MESON_SDHC_MISC_MANUAL_STOP,
+                          manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0);
+
        if (cmd->opcode == MMC_STOP_TRANSMISSION)
                send |= MESON_SDHC_SEND_DATA_STOP;
 
index fdaa11f92fe6f5c31901f85128d5b60c2191f3a5..a75d3dd34d18cb41f24f4f201613916a3110fb09 100644 (file)
@@ -441,6 +441,8 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
                return -EINVAL;
        }
 
+       writel_relaxed(0, dlyb->base + DLYB_CR);
+
        phase = end_of_len - max_len / 2;
        sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
 
index 943940b44e835e13635ea9318220d776f60a2add..632775217d35c5bd1242be1703d28d78248f4bfb 100644 (file)
@@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
                        sdr_set_field(host->base + PAD_DS_TUNE,
                                      PAD_DS_TUNE_DLY1, i);
                ret = mmc_get_ext_csd(card, &ext_csd);
-               if (!ret)
+               if (!ret) {
                        result_dly1 |= (1 << i);
+                       kfree(ext_csd);
+               }
        }
        host->hs400_tuning = false;
 
index a4407f391f66a6f2f05bebad1334748895c9fe77..f5b2684ad8058b5f40ec6393f1c487de9a6f57f7 100644 (file)
@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
 
        /* Issue CMD19 twice for each tap */
        for (i = 0; i < 2 * priv->tap_num; i++) {
-               int cmd_error;
+               int cmd_error = 0;
 
                /* Set sampling clock position */
                sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
index a5001875876b9715b1e08e0b0d3756e7bbf7bf7a..9762ffab2e236cdc90fe401b29490f715a1aa0b2 100644 (file)
@@ -356,23 +356,6 @@ static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
        }
 }
 
-static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
-                                             struct mmc_ios *ios)
-{
-       struct sdhci_host *host = mmc_priv(mmc);
-       u32 val;
-
-       val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-       if (ios->enhanced_strobe)
-               val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-       else
-               val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-
-       sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-}
-
 static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -793,6 +776,32 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
        }
 }
 
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+                                             struct mmc_ios *ios)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       u32 val;
+
+       val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+       if (ios->enhanced_strobe) {
+               val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+               /*
+                * When CMD13 is sent from mmc_select_hs400es() after
+                * switching to HS400ES mode, the bus is operating at
+                * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
+                * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
+                * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
+                * controller CAR clock and the interface clock are rate matched.
+                */
+               tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
+       } else {
+               val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+       }
+
+       sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+}
+
 static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
index 9802e265fca80a56cd7be99488713dc22dc55246..2b317ed6c103f16f823d2fdbbfc90dc5f5fd1834 100644 (file)
@@ -96,6 +96,13 @@ struct dataflash {
        struct mtd_info         mtd;
 };
 
+static const struct spi_device_id dataflash_dev_ids[] = {
+       { "at45" },
+       { "dataflash" },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
+
 #ifdef CONFIG_OF
 static const struct of_device_id dataflash_dt_ids[] = {
        { .compatible = "atmel,at45", },
@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
                .name           = "mtd_dataflash",
                .of_match_table = of_match_ptr(dataflash_dt_ids),
        },
+       .id_table = dataflash_dev_ids,
 
        .probe          = dataflash_probe,
        .remove         = dataflash_remove,
index 67b7cb67c0307b5cbe4288860b1f6a705d5530aa..0a45d3c6c15ba1fca6b042fa0249c0d21375c166 100644 (file)
@@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI
 config MTD_NAND_DENALI_DT
        tristate "Denali NAND controller as a DT device"
        select MTD_NAND_DENALI
-       depends on HAS_DMA && HAVE_CLK && OF
+       depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
        help
          Enable the driver for NAND flash on platforms using a Denali NAND
          controller as a DT device.
index 658f0cbe7ce8ca3b545c28cba577258ed54f4e52..6b2bda815b880c969c239d9dc6336d2392dcbb42 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/delay.h>
 #include <linux/dmaengine.h>
 #include <linux/dma-direction.h>
 #include <linux/dma-mapping.h>
 
 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
 
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ *  TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ *  TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL        7000
+#define TINDEL 5000
+
 struct fsmc_nand_timings {
        u8 tclr;
        u8 tar;
@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
 {
        unsigned long hclk = clk_get_rate(host->clk);
        unsigned long hclkn = NSEC_PER_SEC / hclk;
-       u32 thiz, thold, twait, tset;
+       u32 thiz, thold, twait, tset, twait_min;
 
        if (sdrt->tRC_min < 30000)
                return -EOPNOTSUPP;
@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->thold > FSMC_THOLD_MASK)
                tims->thold = FSMC_THOLD_MASK;
 
-       twait = max(sdrt->tRP_min, sdrt->tWP_min);
-       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
-       if (tims->twait == 0)
-               tims->twait = 1;
-       else if (tims->twait > FSMC_TWAIT_MASK)
-               tims->twait = FSMC_TWAIT_MASK;
-
        tset = max(sdrt->tCS_min - sdrt->tWP_min,
                   sdrt->tCEA_max - sdrt->tREA_max);
        tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
        else if (tims->tset > FSMC_TSET_MASK)
                tims->tset = FSMC_TSET_MASK;
 
+       /*
+        * According to SPEAr300 Reference Manual (RM0082) which gives more
+        * information related to FSMSC timings than the SPEAr600 one (RM0305),
+        *   twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+        */
+       twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+                   + TOUTDEL + TINDEL;
+       twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+       tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+       if (tims->twait == 0)
+               tims->twait = 1;
+       else if (tims->twait > FSMC_TWAIT_MASK)
+               tims->twait = FSMC_TWAIT_MASK;
+
        return 0;
 }
 
@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
                                                instr->ctx.waitrdy.timeout_ms);
                        break;
                }
+
+               if (instr->delay_ns)
+                       ndelay(instr->delay_ns);
        }
 
        return ret;
index 3d6c6e88052072751886f8391ae1561f960e8b72..a130320de4128e79d3aa15bcaf23b5a318993a16 100644 (file)
@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
                                 struct nand_sdr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_SDR_IFACE;
 
@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip,
                                   struct nand_nvddr_timings *spec_timings)
 {
        const struct nand_controller_ops *ops = chip->controller->ops;
-       int best_mode = 0, mode, ret;
+       int best_mode = 0, mode, ret = -EOPNOTSUPP;
 
        iface->type = NAND_NVDDR_IFACE;
 
@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
                        NAND_OP_CMD(NAND_CMD_ERASE1, 0),
                        NAND_OP_ADDR(2, addrs, 0),
                        NAND_OP_CMD(NAND_CMD_ERASE2,
-                                   NAND_COMMON_TIMING_MS(conf, tWB_max)),
+                                   NAND_COMMON_TIMING_NS(conf, tWB_max)),
                        NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
                                         0),
                };
index 2ec8e015c7b3364ae8ab07d0750c4ded29d5815a..533e476988f2492bd56dd97e1e16f6cb80737086 100644 (file)
@@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work)
        struct slave *slave;
 
        if (!bond_has_slaves(bond)) {
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
                bond_info->lp_counter = 0;
                goto re_arm;
        }
 
        rcu_read_lock();
 
-       bond_info->tx_rebalance_counter++;
+       atomic_inc(&bond_info->tx_rebalance_counter);
        bond_info->lp_counter++;
 
        /* send learning packets */
@@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work)
        }
 
        /* rebalance tx traffic */
-       if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+       if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
                bond_for_each_slave_rcu(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work)
                                bond_info->unbalanced_load = 0;
                        }
                }
-               bond_info->tx_rebalance_counter = 0;
+               atomic_set(&bond_info->tx_rebalance_counter, 0);
        }
 
        if (bond_info->rlb_enabled) {
@@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
        tlb_init_slave(slave);
 
        /* order a rebalance ASAP */
-       bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+       atomic_set(&bond->alb_info.tx_rebalance_counter,
+                  BOND_TLB_REBALANCE_TICKS);
 
        if (bond->alb_info.rlb_enabled)
                bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
                        rlb_clear_slave(bond, slave);
        } else if (link == BOND_LINK_UP) {
                /* order a rebalance ASAP */
-               bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+               atomic_set(&bond_info->tx_rebalance_counter,
+                          BOND_TLB_REBALANCE_TICKS);
                if (bond->alb_info.rlb_enabled) {
                        bond->alb_info.rlb_rebalance = 1;
                        /* If the updelay module parameter is smaller than the
index a8fde3bc458f6c360252f0a3e0648e533936524e..b93337b5a7211f0361eb05ef81f503a5440272aa 100644 (file)
@@ -1526,7 +1526,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
                mac = (u8 *)&newval->value;
        }
 
-       if (!is_valid_ether_addr(mac))
+       if (is_multicast_ether_addr(mac))
                goto err;
 
        netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
index 74d9899fc904c099d51f23c591ec71b2acb6c37e..eb74cdf26b88c55e7df9fee2cc1d212c5b2a1e6b 100644 (file)
@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
 #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
 
+/* Kvaser KCAN_EPACK second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
 struct kvaser_pciefd;
 
 struct kvaser_pciefd_can {
@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
 
        can->err_rep_cnt++;
        can->can.can_stats.bus_error++;
-       stats->rx_errors++;
+       if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+               stats->tx_errors++;
+       else
+               stats->rx_errors++;
 
        can->bec.txerr = bec.txerr;
        can->bec.rxerr = bec.rxerr;
index 2470c47b2e315b46cc55f94dd55520096f88b23e..c2a8421e7845c49ffdf16727dd168cdbf4df8c55 100644 (file)
@@ -204,16 +204,16 @@ enum m_can_reg {
 
 /* Interrupts for version 3.0.x */
 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
 
 /* Interrupts for version >= 3.1.x */
 #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
-                        IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
-                        IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X      (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+                        IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+                        IR_RF0L)
 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
 
 /* Interrupt Line Select (ILS) */
@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
                err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
                                      cf->data, DIV_ROUND_UP(cf->len, 4));
                if (err)
-                       goto out_fail;
+                       goto out_free_skb;
        }
 
        /* acknowledge rx fifo 0 */
@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
 
        return 0;
 
+out_free_skb:
+       kfree_skb(skb);
 out_fail:
        netdev_err(dev, "FIFO read returned %d\n", err);
        return err;
@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
 {
        if (irqstatus & IR_WDI)
                netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
-       if (irqstatus & IR_ELO)
-               netdev_err(dev, "Error Logging Overflow\n");
        if (irqstatus & IR_BEU)
                netdev_err(dev, "Bit Error Uncorrected\n");
        if (irqstatus & IR_BEC)
@@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
        case 30:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_30X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_30X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_30X;
                break;
        case 31:
                /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
                can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
                break;
        case 32:
        case 33:
                /* Support both MCAN version v3.2.x and v3.3.0 */
-               cdev->can.bittiming_const = &m_can_bittiming_const_31X;
-               cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+               cdev->can.bittiming_const = cdev->bit_timing ?
+                       cdev->bit_timing : &m_can_bittiming_const_31X;
+
+               cdev->can.data_bittiming_const = cdev->data_timing ?
+                       cdev->data_timing :
+                       &m_can_data_bittiming_const_31X;
 
                cdev->can.ctrlmode_supported |=
                        (m_can_niso_supported(cdev) ?
index d18b515e6ccc76c33660ce9fd0bed098ebf18559..2c5d40997168616ca1ab85d1235560c654f2dfe7 100644 (file)
@@ -85,6 +85,9 @@ struct m_can_classdev {
        struct sk_buff *tx_skb;
        struct phy *transceiver;
 
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+
        struct m_can_ops *ops;
 
        int version;
index 89cc3d41e952bb9b49c85b77dc979f24a8979ffe..b56a54d6c5a9c4d274ecc76b637fbaa02ff5cf84 100644 (file)
 
 #define M_CAN_PCI_MMIO_BAR             0
 
-#define M_CAN_CLOCK_FREQ_EHL           100000000
 #define CTL_CSR_INT_CTL_OFFSET         0x508
 
+struct m_can_pci_config {
+       const struct can_bittiming_const *bit_timing;
+       const struct can_bittiming_const *data_timing;
+       unsigned int clock_freq;
+};
+
 struct m_can_pci_priv {
        struct m_can_classdev cdev;
 
@@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
 static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *src = priv->base + offset;
 
-       ioread32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               *(unsigned int *)val = ioread32(src);
+               val += 4;
+               src += 4;
+       }
 
        return 0;
 }
@@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
                            const void *val, size_t val_count)
 {
        struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+       void __iomem *dst = priv->base + offset;
 
-       iowrite32_rep(priv->base + offset, val, val_count);
+       while (val_count--) {
+               iowrite32(*(unsigned int *)val, dst);
+               val += 4;
+               dst += 4;
+       }
 
        return 0;
 }
@@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = {
        .read_fifo = iomap_read_fifo,
 };
 
+static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 64,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 128,
+       .sjw_max = 128,
+       .brp_min = 1,
+       .brp_max = 512,
+       .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+       .name = KBUILD_MODNAME,
+       .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
+       .tseg1_max = 16,
+       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_max = 8,
+       .sjw_max = 4,
+       .brp_min = 1,
+       .brp_max = 32,
+       .brp_inc = 1,
+};
+
+static const struct m_can_pci_config m_can_pci_ehl = {
+       .bit_timing = &m_can_bittiming_const_ehl,
+       .data_timing = &m_can_data_bittiming_const_ehl,
+       .clock_freq = 200000000,
+};
+
 static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
 {
        struct device *dev = &pci->dev;
+       const struct m_can_pci_config *cfg;
        struct m_can_classdev *mcan_class;
        struct m_can_pci_priv *priv;
        void __iomem *base;
@@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        if (!mcan_class)
                return -ENOMEM;
 
+       cfg = (const struct m_can_pci_config *)id->driver_data;
+
        priv = cdev_to_priv(mcan_class);
 
        priv->base = base;
@@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
        mcan_class->dev = &pci->dev;
        mcan_class->net->irq = pci_irq_vector(pci, 0);
        mcan_class->pm_clock_support = 1;
-       mcan_class->can.clock.freq = id->driver_data;
+       mcan_class->bit_timing = cfg->bit_timing;
+       mcan_class->data_timing = cfg->data_timing;
+       mcan_class->can.clock.freq = cfg->clock_freq;
        mcan_class->ops = &m_can_pci_ops;
 
        pci_set_drvdata(pci, mcan_class);
@@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
                         m_can_pci_suspend, m_can_pci_resume);
 
 static const struct pci_device_id m_can_pci_id_table[] = {
-       { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
-       { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+       { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+       { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
        {  }    /* Terminating Entry */
 };
 MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
index 92a54a5fd4c502c3238f500e9bf39b44b44582a6..964c8a09226a9fc1af3de261948d62638ff9f48e 100644 (file)
@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
                        cf->data[i + 1] = data_reg >> 8;
                }
 
-               netif_receive_skb(skb);
                rcv_pkts++;
                stats->rx_packets++;
                quota--;
                stats->rx_bytes += cf->len;
+               netif_receive_skb(skb);
 
                pch_fifo_thresh(priv, obj_num);
                obj_num++;
index e21b169c14c0122d9038fdd967dfc15cedde7da3..4642b6d4aaf7bb1f3055eb80ee563b9cd55fd1d5 100644 (file)
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
                        free_sja1000dev(dev);
        }
 
-       err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+       if (!card->channels) {
+               err = -ENODEV;
+               goto failure_cleanup;
+       }
+
+       err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
                          DRV_NAME, card);
        if (!err)
                return 0;
index 59ba7c7beec00f4e10c19bcc03995941017c158b..f7af1bf5ab46d92bc7c117c91e1a61de4017838d 100644 (file)
 
 #include "kvaser_usb.h"
 
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK                  8000000
 #define MAX_USBCAN_NET_DEVICES         2
 
 /* Command header size */
@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
 
 #define CMD_LEAF_LOG_MESSAGE           106
 
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
 /* error factors */
 #define M16C_EF_ACKE                   BIT(0)
 #define M16C_EF_CRCE                   BIT(1)
@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
        };
 };
 
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+       .name = "kvaser_usb",
+       .tseg1_min = KVASER_USB_TSEG1_MIN,
+       .tseg1_max = KVASER_USB_TSEG1_MAX,
+       .tseg2_min = KVASER_USB_TSEG2_MIN,
+       .tseg2_max = KVASER_USB_TSEG2_MAX,
+       .sjw_max = KVASER_USB_SJW_MAX,
+       .brp_min = KVASER_USB_BRP_MIN,
+       .brp_max = KVASER_USB_BRP_MAX,
+       .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+       .clock = {
+               .freq = 8000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+       .clock = {
+               .freq = 16000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+       .clock = {
+               .freq = 24000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+       .clock = {
+               .freq = 32000000,
+       },
+       .timestamp_freq = 1,
+       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
 static void *
 kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
                             const struct sk_buff *skb, int *frame_len,
@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
        return rc;
 }
 
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+                                                  const struct leaf_cmd_softinfo *softinfo)
+{
+       u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+       dev->fw_version = le32_to_cpu(softinfo->fw_version);
+       dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+       switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+       case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
+               break;
+       case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
+               break;
+       }
+}
+
 static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 {
        struct kvaser_cmd cmd;
@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
 
        switch (dev->card_data.leaf.family) {
        case KVASER_LEAF:
-               dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
-               dev->max_tx_urbs =
-                       le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+               kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
                break;
        case KVASER_USBCAN:
                dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
                dev->max_tx_urbs =
                        le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+               dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
                break;
        }
 
@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
 {
        struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
 
-       dev->cfg = &kvaser_usb_leaf_dev_cfg;
        card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
 
        return 0;
 }
 
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
-       .name = "kvaser_usb",
-       .tseg1_min = KVASER_USB_TSEG1_MIN,
-       .tseg1_max = KVASER_USB_TSEG1_MAX,
-       .tseg2_min = KVASER_USB_TSEG2_MIN,
-       .tseg2_max = KVASER_USB_TSEG2_MAX,
-       .sjw_max = KVASER_USB_SJW_MAX,
-       .brp_min = KVASER_USB_BRP_MIN,
-       .brp_max = KVASER_USB_BRP_MAX,
-       .brp_inc = KVASER_USB_BRP_INC,
-};
-
 static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
 {
        struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
        .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
        .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
 };
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
-       .clock = {
-               .freq = CAN_USB_CLOCK,
-       },
-       .timestamp_freq = 1,
-       .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
index 01e37b75471e111430fcd30c1a159afc7d5540bc..2b88f03e5252182251a47e056221f60802311543 100644 (file)
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, b53_spi_of_match);
 
+static const struct spi_device_id b53_spi_ids[] = {
+       { .name = "bcm5325" },
+       { .name = "bcm5365" },
+       { .name = "bcm5395" },
+       { .name = "bcm5397" },
+       { .name = "bcm5398" },
+       { .name = "bcm53115" },
+       { .name = "bcm53125" },
+       { .name = "bcm53128" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
 static struct spi_driver b53_spi_driver = {
        .driver = {
                .name   = "b53-switch",
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
        .probe  = b53_spi_probe,
        .remove = b53_spi_remove,
        .shutdown = b53_spi_shutdown,
+       .id_table = b53_spi_ids,
 };
 
 module_spi_driver(b53_spi_driver);
index f00cbf5753b914040be2dc3b31f2a15cb4274976..cd8462d1e27c04f09dae7702d8ff2a391cd35373 100644 (file)
@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
        u16 reg;
        int err;
 
+       /* The 88e6250 family does not have the PHY detect bit. Instead,
+        * report whether the port is internal.
+        */
+       if (chip->info->family == MV88E6XXX_FAMILY_6250)
+               return port < chip->info->num_internal_phys;
+
        err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
        if (err) {
                dev_err(chip->dev,
@@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
 {
        struct mv88e6xxx_chip *chip = ds->priv;
        struct mv88e6xxx_port *p;
-       int err;
+       int err = 0;
 
        p = &chip->ports[port];
 
-       /* FIXME: is this the correct test? If we're in fixed mode on an
-        * internal port, why should we process this any different from
-        * PHY mode? On the other hand, the port may be automedia between
-        * an internal PHY and the serdes...
-        */
-       if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
-               return;
-
        mv88e6xxx_reg_lock(chip);
-       /* In inband mode, the link may come up at any time while the link
-        * is not forced down. Force the link down while we reconfigure the
-        * interface mode.
-        */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
-               chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
 
-       err = mv88e6xxx_port_config_interface(chip, port, state->interface);
-       if (err && err != -EOPNOTSUPP)
-               goto err_unlock;
-
-       err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
-                                         state->advertising);
-       /* FIXME: we should restart negotiation if something changed - which
-        * is something we get if we convert to using phylinks PCS operations.
-        */
-       if (err > 0)
-               err = 0;
+       if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+               /* In inband mode, the link may come up at any time while the
+                * link is not forced down. Force the link down while we
+                * reconfigure the interface mode.
+                */
+               if (mode == MLO_AN_INBAND &&
+                   p->interface != state->interface &&
+                   chip->info->ops->port_set_link)
+                       chip->info->ops->port_set_link(chip, port,
+                                                      LINK_FORCED_DOWN);
+
+               err = mv88e6xxx_port_config_interface(chip, port,
+                                                     state->interface);
+               if (err && err != -EOPNOTSUPP)
+                       goto err_unlock;
+
+               err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+                                                 state->interface,
+                                                 state->advertising);
+               /* FIXME: we should restart negotiation if something changed -
+                * which is something we get if we convert to using phylinks
+                * PCS operations.
+                */
+               if (err > 0)
+                       err = 0;
+       }
 
        /* Undo the forced down state above after completing configuration
-        * irrespective of its state on entry, which allows the link to come up.
+        * irrespective of its state on entry, which allows the link to come
+        * up in the in-band case where there is no separate SERDES. Also
+        * ensure that the link can come up if the PPU is in use and we are
+        * in PHY mode (we treat the PPU as an effective in-band mechanism.)
         */
-       if (mode == MLO_AN_INBAND && p->interface != state->interface &&
-           chip->info->ops->port_set_link)
+       if (chip->info->ops->port_set_link &&
+           ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+            (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
                chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
 
        p->interface = state->interface;
@@ -752,13 +762,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Force the link down if we know the port may not be automatically
+        * updated by the switch or if we are using fixed-link mode.
         */
-       if (((!mv88e6xxx_phy_is_internal(ds, port) &&
-             !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
             mode == MLO_AN_FIXED) && ops->port_sync_link)
                err = ops->port_sync_link(chip, port, mode, false);
+
+       if (!err && ops->port_set_speed_duplex)
+               err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+                                                DUPLEX_UNFORCED);
        mv88e6xxx_reg_unlock(chip);
 
        if (err)
@@ -779,11 +792,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
        ops = chip->info->ops;
 
        mv88e6xxx_reg_lock(chip);
-       /* Internal PHYs propagate their configuration directly to the MAC.
-        * External PHYs depend on whether the PPU is enabled for this port.
+       /* Configure and force the link up if we know that the port may not
+        * automatically updated by the switch or if we are using fixed-link
+        * mode.
         */
-       if ((!mv88e6xxx_phy_is_internal(ds, port) &&
-            !mv88e6xxx_port_ppu_updates(chip, port)) ||
+       if (!mv88e6xxx_port_ppu_updates(chip, port) ||
            mode == MLO_AN_FIXED) {
                /* FIXME: for an automedia port, should we force the link
                 * down here - what if the link comes up due to "other" media
index d9817b20ea641f9b642435e693b5ee7e86398a7e..ab41619a809b3e98c3da9a53ec455c42a79a7293 100644 (file)
@@ -283,7 +283,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
        if (err)
                return err;
 
-       if (speed)
+       if (speed != SPEED_UNFORCED)
                dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
        else
                dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -516,7 +516,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
        if (err)
                return err;
 
-       if (speed)
+       if (speed != SPEED_UNFORCED)
                dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
        else
                dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
index 6ea003678798651f70df5f8235863f7c79973c85..2b05ead515cdcbe756ec4d34d67e823912174e49 100644 (file)
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
 }
 
 static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
-                                         u16 status, u16 lpa,
+                                         u16 ctrl, u16 status, u16 lpa,
                                          struct phylink_link_state *state)
 {
+       state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+
        if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
-               state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+               /* The Spped and Duplex Resolved register is 1 if AN is enabled
+                * and complete, or if AN is disabled. So with disabled AN we
+                * still get here on link up. But we want to set an_complete
+                * only if AN was enabled, thus we look at BMCR_ANENABLE.
+                * (According to 802.3-2008 section 22.2.4.2.10, we should be
+                *  able to get this same value from BMSR_ANEGCAPABLE, but tests
+                *  show that these Marvell PHYs don't conform to this part of
+                *  the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+                */
+               state->an_complete = !!(ctrl & BMCR_ANENABLE);
                state->duplex = status &
                                MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
                                                 DUPLEX_FULL : DUPLEX_HALF;
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
                        dev_err(chip->dev, "invalid PHY speed\n");
                        return -EINVAL;
                }
+       } else if (state->link &&
+                  state->interface != PHY_INTERFACE_MODE_SGMII) {
+               /* If Speed and Duplex Resolved register is 0 and link is up, it
+                * means that AN was enabled, but link partner had it disabled
+                * and the PHY invoked the Auto-Negotiation Bypass feature and
+                * linked anyway.
+                */
+               state->duplex = DUPLEX_FULL;
+               if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+                       state->speed = SPEED_2500;
+               else
+                       state->speed = SPEED_1000;
        } else {
                state->link = false;
        }
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                                   int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6352_serdes_read(chip, 0x11, &status);
        if (err) {
                dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -801,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                           bool up)
 {
        u8 cmode = chip->ports[port].cmode;
-       int err = 0;
+       int err;
 
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -813,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
        case MV88E6XXX_PORT_STS_CMODE_RXAUI:
                err = mv88e6390_serdes_power_10g(chip, lane, up);
                break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
        if (!err && up)
@@ -883,9 +915,16 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
 static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
        int port, int lane, struct phylink_link_state *state)
 {
-       u16 lpa, status;
+       u16 lpa, status, ctrl;
        int err;
 
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6390_SGMII_BMCR, &ctrl);
+       if (err) {
+               dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+               return err;
+       }
+
        err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
                                    MV88E6390_SGMII_PHY_STATUS, &status);
        if (err) {
@@ -900,7 +939,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
                return err;
        }
 
-       return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+       return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
 }
 
 static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1271,9 +1310,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
        }
 }
 
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+                                       bool on)
 {
-       u16 reg, pcs;
+       u16 reg;
+       int err;
+
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_CTRL1, &reg);
+       if (err)
+               return err;
+
+       if (on)
+               reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                        MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+       else
+               reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+                      MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+       return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                     MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg;
        int err;
 
        /* mv88e6393x family errata 4.6:
@@ -1284,26 +1345,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
         * It seems that after this workaround the SERDES is automatically
         * powered up (the bit is cleared), so power it down.
         */
-       if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
-           lane == MV88E6393X_PORT10_LANE) {
-               err = mv88e6390_serdes_read(chip, lane,
-                                           MDIO_MMD_PHYXS,
-                                           MV88E6393X_SERDES_POC, &reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
 
-               reg &= ~MV88E6393X_SERDES_POC_PDOWN;
-               reg |= MV88E6393X_SERDES_POC_RESET;
+       reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+       reg |= MV88E6393X_SERDES_POC_RESET;
 
-               err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
-                                            MV88E6393X_SERDES_POC, reg);
-               if (err)
-                       return err;
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
+       if (err)
+               return err;
 
-               err = mv88e6390_serdes_power_sgmii(chip, lane, false);
-               if (err)
-                       return err;
-       }
+       err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+       if (err)
+               return err;
+
+       err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+       if (err)
+               return err;
+
+       return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+       u16 reg, pcs;
+       int err;
 
        /* mv88e6393x family errata 4.8:
         * When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@@ -1334,38 +1414,152 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
                                      MV88E6393X_ERRATA_4_8_REG, reg);
 }
 
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+                                        u8 cmode)
+{
+       static const struct {
+               u16 dev, reg, val, mask;
+       } fixes[] = {
+               { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+               { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+               { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+               { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+               { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+               { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+               { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+                 MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+       };
+       int err, i;
+       u16 reg;
+
+       /* mv88e6393x family errata 5.2:
+        * For optimal signal integrity the following sequence should be applied
+        * to SERDES operating in 10G mode. These registers only apply to 10G
+        * operation and have no effect on other speeds.
+        */
+       if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+               err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+                                           fixes[i].reg, &reg);
+               if (err)
+                       return err;
+
+               reg &= ~fixes[i].mask;
+               reg |= fixes[i].val;
+
+               err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+                                            fixes[i].reg, reg);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+                                             int lane, u8 cmode, bool on)
 {
+       u16 reg;
        int err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+       if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+               return 0;
+
+       /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+        * standard mechanism (via cmode).
+        * We can get around this by configuring the PCS mode to 1000base-x
+        * and then writing value 0x58 to register 1e.8000. (This must be done
+        * while SerDes receiver and transmitter are disabled, which is, when
+        * this function is called.)
+        * It seem that when we do this configuration to 2500base-x mode (by
+        * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+        * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+        * thinks that it already has SerDes at 1.25 GHz and does not change
+        * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+        * To avoid this, change PCS mode back to 2500base-x when disabling
+        * SerDes from 2500base-x mode.
+        */
+       err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+                                   MV88E6393X_SERDES_POC, &reg);
+       if (err)
+               return err;
+
+       reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+       if (on)
+               reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+                      MV88E6393X_SERDES_POC_AN;
+       else
+               reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+       reg |= MV88E6393X_SERDES_POC_RESET;
+
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+                                    MV88E6393X_SERDES_POC, reg);
        if (err)
                return err;
 
-       err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+       err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
        if (err)
                return err;
 
-       return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+       return 0;
 }
 
 int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
                            bool on)
 {
        u8 cmode = chip->ports[port].cmode;
+       int err;
 
        if (port != 0 && port != 9 && port != 10)
                return -EOPNOTSUPP;
 
+       if (on) {
+               err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        true);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_power_lane(chip, lane, true);
+               if (err)
+                       return err;
+       }
+
        switch (cmode) {
        case MV88E6XXX_PORT_STS_CMODE_SGMII:
        case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
        case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
-               return mv88e6390_serdes_power_sgmii(chip, lane, on);
+               err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+               break;
        case MV88E6393X_PORT_STS_CMODE_5GBASER:
        case MV88E6393X_PORT_STS_CMODE_10GBASER:
-               return mv88e6390_serdes_power_10g(chip, lane, on);
+               err = mv88e6390_serdes_power_10g(chip, lane, on);
+               break;
+       default:
+               err = -EINVAL;
+               break;
        }
 
-       return 0;
+       if (err)
+               return err;
+
+       if (!on) {
+               err = mv88e6393x_serdes_power_lane(chip, lane, false);
+               if (err)
+                       return err;
+
+               err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+                                                        false);
+       }
+
+       return err;
 }
index cbb3ba30caea9db2e5f9f3fa3820946b50d1c4fe..8dd8ed225b4594f5421d91fb3755dfe242cb0bfb 100644 (file)
 #define MV88E6393X_SERDES_POC_PCS_MASK         0x0007
 #define MV88E6393X_SERDES_POC_RESET            BIT(15)
 #define MV88E6393X_SERDES_POC_PDOWN            BIT(5)
+#define MV88E6393X_SERDES_POC_AN               BIT(3)
+#define MV88E6393X_SERDES_CTRL1                        0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN       BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN       BIT(8)
 
 #define MV88E6393X_ERRATA_4_8_REG              0xF074
 #define MV88E6393X_ERRATA_4_8_BIT              BIT(14)
index 327cc46548065461995cbaf3b23f2abfaba129a5..f1a05e7dc81811ea36a5e9825d553badccbefbbb 100644 (file)
@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
                }
        }
 
-       if (cpu < 0)
+       if (cpu < 0) {
+               kfree(tagging_rule);
+               kfree(redirect_rule);
                return -EINVAL;
+       }
 
        tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
        *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
index baaae97283c5e259ef3cbd7e293c3db3f257e459..078ca4cd716057ee3674be7516de58185c03a533 100644 (file)
 #define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC    2112
 
 /* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX   7
 #define RTL8365MB_NUM_PHYREGS  32
 #define RTL8365MB_PHYREGMAX    (RTL8365MB_NUM_PHYREGS - 1)
 #define RTL8365MB_MAX_NUM_PORTS        (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
 #define RTL8365MB_INDIRECT_ACCESS_STATUS_REG                   0x1F01
 #define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG                  0x1F02
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK    GENMASK(4, 0)
-#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(6, 5)
+#define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK                GENMASK(7, 5)
 #define   RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK    GENMASK(11, 8)
 #define   RTL8365MB_PHY_BASE                                   0x2000
 #define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG               0x1F03
@@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
        u16 val;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
@@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
        u32 ocp_addr;
        int ret;
 
+       if (phy > RTL8365MB_PHYADDRMAX)
+               return -EINVAL;
+
        if (regnum > RTL8365MB_PHYREGMAX)
                return -EINVAL;
 
index d75d95a97dd93207df9604b210dfb82e5cd09aa4..993b2fb429612c403821e060d0fb63cb83a19906 100644 (file)
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
                priv->rxdescmem_busaddr = dma_res->start;
 
        } else {
+               ret = -ENODEV;
                goto err_free_netdev;
        }
 
-       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
                dma_set_coherent_mask(priv->device,
                                      DMA_BIT_MASK(priv->dmaops->dmamask));
-       else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+       } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
-       else
+       } else {
+               ret = -EIO;
                goto err_free_netdev;
+       }
 
        /* MAC address space */
        ret = request_and_map(pdev, "control_port", &control_port,
index 23b2d390fcdda8af72fe7e474fc5fef7194a9274..ace691d7cd759f52d38d116ce8cfe979089d7f25 100644 (file)
 
 #define AQ_DEVICE_ID_AQC113DEV 0x00C0
 #define AQ_DEVICE_ID_AQC113CS  0x94C0
+#define AQ_DEVICE_ID_AQC113CA  0x34C0
 #define AQ_DEVICE_ID_AQC114CS  0x93C0
 #define AQ_DEVICE_ID_AQC113    0x04C0
 #define AQ_DEVICE_ID_AQC113C   0x14C0
 #define AQ_DEVICE_ID_AQC115C   0x12C0
+#define AQ_DEVICE_ID_AQC116C   0x11C0
 
 #define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
 
 
 #define AQ_NIC_RATE_10G                BIT(0)
 #define AQ_NIC_RATE_5G         BIT(1)
-#define AQ_NIC_RATE_5GSR       BIT(2)
-#define AQ_NIC_RATE_2G5                BIT(3)
-#define AQ_NIC_RATE_1G         BIT(4)
-#define AQ_NIC_RATE_100M       BIT(5)
-#define AQ_NIC_RATE_10M                BIT(6)
-#define AQ_NIC_RATE_1G_HALF    BIT(7)
-#define AQ_NIC_RATE_100M_HALF  BIT(8)
-#define AQ_NIC_RATE_10M_HALF   BIT(9)
+#define AQ_NIC_RATE_2G5                BIT(2)
+#define AQ_NIC_RATE_1G         BIT(3)
+#define AQ_NIC_RATE_100M       BIT(4)
+#define AQ_NIC_RATE_10M                BIT(5)
+#define AQ_NIC_RATE_1G_HALF    BIT(6)
+#define AQ_NIC_RATE_100M_HALF  BIT(7)
+#define AQ_NIC_RATE_10M_HALF   BIT(8)
 
-#define AQ_NIC_RATE_EEE_10G    BIT(10)
-#define AQ_NIC_RATE_EEE_5G     BIT(11)
-#define AQ_NIC_RATE_EEE_2G5    BIT(12)
-#define AQ_NIC_RATE_EEE_1G     BIT(13)
-#define AQ_NIC_RATE_EEE_100M   BIT(14)
+#define AQ_NIC_RATE_EEE_10G    BIT(9)
+#define AQ_NIC_RATE_EEE_5G     BIT(10)
+#define AQ_NIC_RATE_EEE_2G5    BIT(11)
+#define AQ_NIC_RATE_EEE_1G     BIT(12)
+#define AQ_NIC_RATE_EEE_100M   BIT(13)
 #define AQ_NIC_RATE_EEE_MSK     (AQ_NIC_RATE_EEE_10G |\
                                 AQ_NIC_RATE_EEE_5G |\
                                 AQ_NIC_RATE_EEE_2G5 |\
index 062a300a566a55c505cb556530a3ca0aec83c703..dbd28466013580aca8cab6b99bbc168dd9c40e99 100644 (file)
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
 };
 
 struct aq_stats_s {
+       u64 brc;
+       u64 btc;
        u64 uprc;
        u64 mprc;
        u64 bprc;
index 1acf544afeb4449b55ad4ce93781c13ccfa98a51..33f1a1377588bda47db7a76c94421a3f84d3ac4f 100644 (file)
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
        aq_macsec_init(self);
 #endif
 
-       mutex_lock(&self->fwreq_mutex);
-       err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
-       mutex_unlock(&self->fwreq_mutex);
-       if (err)
-               goto err_exit;
+       if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+               // If DT has none or an invalid one, ask device for MAC address
+               mutex_lock(&self->fwreq_mutex);
+               err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+               mutex_unlock(&self->fwreq_mutex);
 
-       eth_hw_addr_set(self->ndev, addr);
+               if (err)
+                       goto err_exit;
 
-       if (!is_valid_ether_addr(self->ndev->dev_addr) ||
-           !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
-               netdev_warn(self->ndev, "MAC is invalid, will use random.");
-               eth_hw_addr_random(self->ndev);
+               if (is_valid_ether_addr(addr) &&
+                   aq_nic_is_valid_ether_addr(addr)) {
+                       eth_hw_addr_set(self->ndev, addr);
+               } else {
+                       netdev_warn(self->ndev, "MAC is invalid, will use random.");
+                       eth_hw_addr_random(self->ndev);
+               }
        }
 
 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
        data[++i] = stats->mbtc;
        data[++i] = stats->bbrc;
        data[++i] = stats->bbtc;
-       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
-       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       if (stats->brc)
+               data[++i] = stats->brc;
+       else
+               data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       if (stats->btc)
+               data[++i] = stats->btc;
+       else
+               data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
        data[++i] = stats->dma_pkt_rc;
        data[++i] = stats->dma_pkt_tc;
        data[++i] = stats->dma_oct_rc;
index d4b1976ee69b934d4a5dffac2d13da5697d78066..797a95142d1f44dbc0ed454e2daf955ec8fc90ea 100644 (file)
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
        { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+       { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
 
        {}
 };
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
        { AQ_DEVICE_ID_AQC113CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC114CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
        { AQ_DEVICE_ID_AQC113C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
-       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+       { AQ_DEVICE_ID_AQC113CA,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+       { AQ_DEVICE_ID_AQC116C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
 };
 
 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
index d281322d7dd29074185e2645aa0a17bb344ef404..f4774cf051c9780cfc434450673bb82d175e2736 100644 (file)
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
 {
        unsigned int count;
 
-       WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
-                 "Invalid tc %u (#rx=%u, #tx=%u)\n",
-                 tc, self->rx_rings, self->tx_rings);
        if (!aq_vec_is_valid_tc(self, tc))
                return 0;
 
index 3f1704cbe1cb96bc0cf80ab1480242e7e06c4463..7e88d7234b14588c1816b2808ab1421ecf23b4a8 100644 (file)
@@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
 int hw_atl_utils_update_stats(struct aq_hw_s *self)
 {
        struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
        struct hw_atl_utils_mbox mbox;
+       bool corrupted_stats = false;
 
        hw_atl_utils_mpi_read_stats(self, &mbox);
 
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
-                       mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+               curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc);
@@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(bbrc);
                AQ_SDELTA(bbtc);
                AQ_SDELTA(dpc);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
 
index eac631c45c565a4c17802123e38c4120bfef848d..4d4cfbc91e19cf658fccd181d037c876a864e8f2 100644 (file)
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
        if (speed & AQ_NIC_RATE_5G)
                rate |= FW2X_RATE_5G;
 
-       if (speed & AQ_NIC_RATE_5GSR)
-               rate |= FW2X_RATE_5G;
-
        if (speed & AQ_NIC_RATE_2G5)
                rate |= FW2X_RATE_2G5;
 
index c98708bb044cad11597fba703882b761ec46f132..5dfc751572edc5d4817d3a0bab21ebddd376bded 100644 (file)
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
                          AQ_NIC_RATE_5G  |
                          AQ_NIC_RATE_2G5 |
                          AQ_NIC_RATE_1G  |
-                         AQ_NIC_RATE_1G_HALF   |
                          AQ_NIC_RATE_100M      |
-                         AQ_NIC_RATE_100M_HALF |
-                         AQ_NIC_RATE_10M       |
-                         AQ_NIC_RATE_10M_HALF,
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_2G5 |
+                         AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+       DEFAULT_BOARD_BASIC_CAPABILITIES,
+       .media_type = AQ_HW_MEDIA_TYPE_TP,
+       .link_speed_msk = AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_100M      |
+                         AQ_NIC_RATE_10M,
 };
 
 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
index de8723f1c28a13f0e98214f9011ac771028f2eff..346f0dc9912e500014d253135ab431f67be8eb3c 100644 (file)
@@ -9,6 +9,8 @@
 #include "aq_common.h"
 
 extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
 extern const struct aq_hw_ops hw_atl2_ops;
 
 #endif /* HW_ATL2_H */
index b66fa346581ce30b94627f810cf013d02f63323c..6bad64c77b87c94a258b4bee5391ffa66a52c031 100644 (file)
@@ -239,7 +239,8 @@ struct version_s {
                u8 minor;
                u16 build;
        } phy;
-       u32 rsvd;
+       u32 drv_iface_ver:4;
+       u32 rsvd:28;
 };
 
 struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
        u16 rsvd2;
 };
 
-struct statistics_s {
+struct statistics_a0_s {
        struct {
                u32 link_up;
                u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
        u32 reserve_fw_gap;
 };
 
+struct __packed statistics_b0_s {
+       u64 rx_good_octets;
+       u64 rx_pause_frames;
+       u64 rx_good_frames;
+       u64 rx_errors;
+       u64 rx_unicast_frames;
+       u64 rx_multicast_frames;
+       u64 rx_broadcast_frames;
+
+       u64 tx_good_octets;
+       u64 tx_pause_frames;
+       u64 tx_good_frames;
+       u64 tx_errors;
+       u64 tx_unicast_frames;
+       u64 tx_multicast_frames;
+       u64 tx_broadcast_frames;
+
+       u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+       union __packed {
+               struct statistics_a0_s a0;
+               struct statistics_b0_s b0;
+       };
+};
+
 struct filter_caps_s {
        u8 l2_filters_base_index:6;
        u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
        u32 rsvd5;
 };
 
-struct fw_interface_out {
+struct __packed fw_interface_out {
        struct transaction_counter_s transaction_id;
        struct version_s version;
        struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
        struct core_dump_s core_dump;
        u32 rsvd11;
        struct statistics_s stats;
-       u32 rsvd12;
        struct filter_caps_s filter_caps;
        struct device_caps_s device_caps;
        u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
 #define  AQ_HOST_MODE_LOW_POWER    3U
 #define  AQ_HOST_MODE_SHUTDOWN     4U
 
+#define  AQ_A2_FW_INTERFACE_A0     0
+#define  AQ_A2_FW_INTERFACE_B0     1
+
 int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
 
 int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
index dd259c8f2f4f397adbed0b5ea284bd8881a48735..58d426dda3edbf13b4e852b3fab14e720d55d3b2 100644 (file)
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
                        if (cnt > AQ_A2_FW_READ_TRY_MAX)
                                return -ETIME;
                        if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
-                               udelay(1);
+                               mdelay(1);
                } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
 
                hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
 {
        link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
        link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
-       link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+       link_options->rate_N5G = link_options->rate_5G;
        link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
        link_options->rate_N2P5G = link_options->rate_2P5G;
        link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
                rate |= AQ_NIC_RATE_10G;
        if (lkp_link_caps->rate_5G)
                rate |= AQ_NIC_RATE_5G;
-       if (lkp_link_caps->rate_N5G)
-               rate |= AQ_NIC_RATE_5GSR;
        if (lkp_link_caps->rate_2P5G)
                rate |= AQ_NIC_RATE_2G5;
        if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
        return 0;
 }
 
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
 {
        struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
-       struct statistics_s stats;
-
-       hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
-                       stats.msm._F_ - priv->last_stats.msm._F_)
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+               curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+       else \
+               corrupted_stats = true; \
+} while (0)
 
        if (self->aq_link_status.mbps) {
                AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
                AQ_SDELTA(mbtc, tx_multicast_octets);
                AQ_SDELTA(bbrc, rx_broadcast_octets);
                AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
        }
 #undef AQ_SDELTA
-       self->curr_stats.dma_pkt_rc =
-               hw_atl_stats_rx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_pkt_tc =
-               hw_atl_stats_tx_dma_good_pkt_counter_get(self);
-       self->curr_stats.dma_oct_rc =
-               hw_atl_stats_rx_dma_good_octet_counter_get(self);
-       self->curr_stats.dma_oct_tc =
-               hw_atl_stats_tx_dma_good_octet_counter_get(self);
-       self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+                               struct statistics_s *stats)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct aq_stats_s curr_stats = *cs;
+       bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F)  \
+do { \
+       if (!corrupted_stats && \
+           ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+               curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+       else \
+               corrupted_stats = true; \
+} while (0)
+
+       if (self->aq_link_status.mbps) {
+               AQ_SDELTA(uprc, rx_unicast_frames);
+               AQ_SDELTA(mprc, rx_multicast_frames);
+               AQ_SDELTA(bprc, rx_broadcast_frames);
+               AQ_SDELTA(erpr, rx_errors);
+               AQ_SDELTA(brc, rx_good_octets);
+
+               AQ_SDELTA(uptc, tx_unicast_frames);
+               AQ_SDELTA(mptc, tx_multicast_frames);
+               AQ_SDELTA(bptc, tx_broadcast_frames);
+               AQ_SDELTA(erpt, tx_errors);
+               AQ_SDELTA(btc, tx_good_octets);
+
+               if (!corrupted_stats)
+                       *cs = curr_stats;
+       }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+       struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+       struct aq_stats_s *cs = &self->curr_stats;
+       struct statistics_s stats;
+       struct version_s version;
+       int err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+       if (err)
+               return err;
+
+       err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+       if (err)
+               return err;
+
+       if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+               aq_a2_fill_a0_stats(self, &stats);
+       else
+               aq_a2_fill_b0_stats(self, &stats);
+
+       cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+       cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+       cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+       cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+       cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
 
        memcpy(&priv->last_stats, &stats, sizeof(stats));
 
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
        hw_atl2_shared_buffer_read_safe(self, version, &version);
 
        /* A2 FW version is stored in reverse order */
-       return version.mac.major << 24 |
-              version.mac.minor << 16 |
-              version.mac.build;
+       return version.bundle.major << 24 |
+              version.bundle.minor << 16 |
+              version.bundle.build;
 }
 
 int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
index 7cc5213c575a1d40cc012d16a0feaf44c611c3cf..b07cb9bc5f2d0bbd5a02364a6100f6e1be806295 100644 (file)
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
 
        enet->irq_tx = platform_get_irq_byname(pdev, "tx");
 
-       dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        err = bcm4908_enet_dma_alloc(enet);
        if (err)
index 40933bf5a71007ca2e31c2b4ffbf23e7d4f92ada..60dde29974bfea53f8092fda078f348d36c6a6ef 100644 (file)
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct device *kdev = &priv->pdev->dev;
        struct bcm_sysport_tx_ring *ring;
+       unsigned long flags, desc_flags;
        struct bcm_sysport_cb *cb;
        struct netdev_queue *txq;
        u32 len_status, addr_lo;
        unsigned int skb_len;
-       unsigned long flags;
        dma_addr_t mapping;
        u16 queue;
        int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        ring->desc_count--;
 
        /* Ports are latched, so write upper address first */
+       spin_lock_irqsave(&priv->desc_lock, desc_flags);
        tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
        tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+       spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
 
        /* Check ring space and update SW control flow */
        if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
        }
 
        /* Initialize both hardware and software ring */
+       spin_lock_init(&priv->desc_lock);
        for (i = 0; i < dev->num_tx_queues; i++) {
                ret = bcm_sysport_init_tx_ring(priv, i);
                if (ret) {
index 984f76e74b43effde480fd2ce8f4612d75300a6c..16b73bb9acc783b41aefb2e6198f354d9e9cc6bd 100644 (file)
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
        int                     wol_irq;
 
        /* Transmit rings */
+       spinlock_t              desc_lock;
        struct bcm_sysport_tx_ring *tx_rings;
 
        /* Receive queue */
index 5f259641437a720c65dd4b3d70867a7bedbe5d0a..c888ddee1fc41628fc13036aa1fa1838b2c3e055 100644 (file)
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                 * Internal or external PHY with MDIO access
                 */
                phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
-               if (!phydev) {
+               if (IS_ERR(phydev)) {
                        dev_err(kdev, "failed to register PHY device\n");
-                       return -ENODEV;
+                       return PTR_ERR(phydev);
                }
        } else {
                /*
index 6451c8383639fcedbe61098ad3c8e244a4fd0a69..8e643567abce2a0673eb32eb3e55d3cc6c15b1bd 100644 (file)
@@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
 
        fsl_mc_portal_free(priv->mc_io);
 
+       destroy_workqueue(priv->dpaa2_ptp_wq);
+
        dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
 
        free_netdev(net_dev);
index 2085844227fe535d190df8ec73d52975638367d2..e54e70ebdd059fa1a4ed98af2721b4f726d865af 100644 (file)
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
        __u64 bytes_per_cdan;
 };
 
+#define DPAA2_ETH_CH_STATS     7
+
 /* Maximum number of queues associated with a DPNI */
 #define DPAA2_ETH_MAX_TCS              8
 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
index adb8ce5306ee84240f6b8b637d1dcd64d8694d61..3fdbf87dccb1ea5abb0841d6c4d452e673c10b01 100644 (file)
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
        /* Per-channel stats */
        for (k = 0; k < priv->num_channels; k++) {
                ch_stats = &priv->channel[k]->stats;
-               for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+               for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
                        *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
        }
        i += j;
index 7b4961daa25402003df2d190ccbff610b78c50b6..ed7301b6916941268d1f6ad54adab7c1d3615ca5 100644 (file)
@@ -377,6 +377,9 @@ struct bufdesc_ex {
 #define FEC_ENET_WAKEUP        ((uint)0x00020000)      /* Wakeup request */
 #define FEC_ENET_TXF   (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
 #define FEC_ENET_RXF   (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X)    (((X) == 0) ? FEC_ENET_RXF_0 :  \
+                               (((X) == 1) ? FEC_ENET_RXF_1 :  \
+                               FEC_ENET_RXF_2))
 #define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
 #define FEC_ENET_TS_TIMER       ((uint)0x00008000)
 
index bc418b910999fc5821cf6917097435c9316745ed..1b1f7f2a61306e78d4dcfc07f1502dfda5b5de43 100644 (file)
@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                        break;
                pkt_received++;
 
-               writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+               writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
 
                /* Check for errors. */
                status ^= BD_ENET_RX_LAST;
index 83ae56c310d3b9322ec351ba1151ae461fa7876f..326b56b49216a8c4e1d3868384622bfa81860f63 100644 (file)
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
         * is not set to GqiRda, choose the queue format in a priority order:
         * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
         */
-       if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
-               dev_info(&priv->pdev->dev,
-                        "Driver is running with GQI RDA queue format.\n");
-       } else if (dev_op_dqo_rda) {
+       if (dev_op_dqo_rda) {
                priv->queue_format = GVE_DQO_RDA_FORMAT;
                dev_info(&priv->pdev->dev,
                         "Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
                         "Driver is running with GQI RDA queue format.\n");
                supported_features_mask =
                        be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+       } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+               dev_info(&priv->pdev->dev,
+                        "Driver is running with GQI RDA queue format.\n");
        } else {
                priv->queue_format = GVE_GQI_QPL_FORMAT;
                if (dev_op_gqi_qpl)
index 88ca49cbc1e2987031ab54a96ab343786fca42ea..d57508bc4307fe3901146983710c6f53734eb4e5 100644 (file)
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
                set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
        } else {
                skb = napi_alloc_skb(napi, len);
+
+               if (unlikely(!skb))
+                       return NULL;
                set_protocol = true;
        }
        __skb_put(skb, len);
index 3f7a9a4c59d566a9f368c47348aa3994bf254585..63f5abcc6bf4113e29f280b1c234c378b356b0e3 100644 (file)
@@ -839,6 +839,8 @@ struct hnae3_handle {
 
        u8 netdev_flags;
        struct dentry *hnae3_dbgfs;
+       /* protects concurrent contention between debugfs commands */
+       struct mutex dbgfs_lock;
 
        /* Network interface message level enabled bits */
        u32 msg_enable;
index 081295bff7654930d243c3eee10379cf65ae4b46..c381f8af67f082f154ad65cdb279c36e64fe7256 100644 (file)
@@ -1226,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
        if (ret)
                return ret;
 
+       mutex_lock(&handle->dbgfs_lock);
        save_buf = &hns3_dbg_cmd[index].buf;
 
        if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
                read_buf = *save_buf;
        } else {
                read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
-               if (!read_buf)
-                       return -ENOMEM;
+               if (!read_buf) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
                /* save the buffer addr until the last read operation */
                *save_buf = read_buf;
-       }
 
-       /* get data ready for the first time to read */
-       if (!*ppos) {
+               /* get data ready for the first time to read */
                ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
                                        read_buf, hns3_dbg_cmd[index].buf_len);
                if (ret)
@@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
 
        size = simple_read_from_buffer(buffer, count, ppos, read_buf,
                                       strlen(read_buf));
-       if (size > 0)
+       if (size > 0) {
+               mutex_unlock(&handle->dbgfs_lock);
                return size;
+       }
 
 out:
        /* free the buffer for the last read operation */
@@ -1265,6 +1268,7 @@ out:
                *save_buf = NULL;
        }
 
+       mutex_unlock(&handle->dbgfs_lock);
        return ret;
 }
 
@@ -1337,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
                        debugfs_create_dir(hns3_dbg_dentry[i].name,
                                           handle->hnae3_dbgfs);
 
+       mutex_init(&handle->dbgfs_lock);
+
        for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
                if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
                     ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1363,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
        return 0;
 
 out:
+       mutex_destroy(&handle->dbgfs_lock);
        debugfs_remove_recursive(handle->hnae3_dbgfs);
        handle->hnae3_dbgfs = NULL;
        return ret;
@@ -1378,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
                        hns3_dbg_cmd[i].buf = NULL;
                }
 
+       mutex_destroy(&handle->dbgfs_lock);
        debugfs_remove_recursive(handle->hnae3_dbgfs);
        handle->hnae3_dbgfs = NULL;
 }
index fdc66fae096011a21a86b84b590604f0de152b27..c5ac6ecf36e107e6bdaf4a111814eae62d877f89 100644 (file)
@@ -114,7 +114,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
 
        memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
 
-       trace_hclge_vf_mbx_send(hdev, req);
+       if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+               trace_hclge_vf_mbx_send(hdev, req);
 
        /* synchronous send */
        if (need_resp) {
index a78c398bf5b25688759be25f5fda981befbae336..01e7d3c0b68ed3020afeee4bb7df8da6794f8b0a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/module.h>
 
 #include "hinic_hw_dev.h"
 #include "hinic_dev.h"
index 3cca51735421a7435f4c7f32fa3f5af9003f2d37..0bb3911dd014d08c3611ea209961fe4e6f7125d4 100644 (file)
@@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
        old_buff_size = adapter->prev_rx_buf_sz;
        new_buff_size = adapter->cur_rx_buf_sz;
 
-       /* Require buff size to be exactly same for now */
-       if (old_buff_size != new_buff_size)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_rx_queues ||
-           old_num_pools > adapter->max_rx_queues ||
-           old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
-           old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+       if (old_buff_size != new_buff_size ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
@@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
        old_mtu = adapter->prev_mtu;
        new_mtu = adapter->req_mtu;
 
-       /* Require MTU to be exactly same to reuse pools for now */
-       if (old_mtu != new_mtu)
-               return false;
-
-       if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
-               return true;
-
-       if (old_num_pools < adapter->min_tx_queues ||
-           old_num_pools > adapter->max_tx_queues ||
-           old_pool_size < adapter->min_tx_entries_per_subcrq ||
-           old_pool_size > adapter->max_tx_entries_per_subcrq)
+       if (old_mtu != new_mtu ||
+           old_num_pools != new_num_pools ||
+           old_pool_size != new_pool_size)
                return false;
 
        return true;
index 291e61ac3e4489eb0fc1e2420723861c914ed18b..2c1b1da1220eca3bb0b5e8a8cf4fe54ce1a70ed2 100644 (file)
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
                return;
        }
+       if (vsi->type != I40E_VSI_MAIN &&
+           vsi->type != I40E_VSI_FDIR &&
+           vsi->type != I40E_VSI_VMDQ2) {
+               dev_info(&pf->pdev->dev,
+                        "vsi %d type %d descriptor rings not available\n",
+                        vsi_seid, vsi->type);
+               return;
+       }
        if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
                dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
                return;
index 80ae264c99ba08f764cd3afeba185fb087465ee6..2ea4deb8fc44c60c45f8b74eab9596e8edee9469 100644 (file)
@@ -1948,6 +1948,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
        return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
 }
 
+/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+       int i;
+
+       /* When handling some messages, it needs VF state to be set.
+        * It is possible that this flag is cleared during VF reset,
+        * so there is a need to wait until the end of the reset to
+        * handle the request message correctly.
+        */
+       for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+               if (test_bit(state, &vf->vf_states))
+                       return true;
+               usleep_range(10000, 20000);
+       }
+
+       return test_bit(state, &vf->vf_states);
+}
+
 /**
  * i40e_vc_get_version_msg
  * @vf: pointer to the VF info
@@ -2008,7 +2034,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        size_t len = 0;
        int ret;
 
-       if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -2131,7 +2157,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
        bool allmulti = false;
        bool alluni = false;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -2219,7 +2245,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_vsi *vsi;
        u16 num_qps_all = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2368,7 +2394,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2540,7 +2566,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2590,7 +2616,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
        u8 cur_pairs = vf->num_queue_pairs;
        struct i40e_pf *pf = vf->pf;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
                return -EINVAL;
 
        if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2635,7 +2661,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
 
        memset(&stats, 0, sizeof(struct i40e_eth_stats));
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
@@ -2752,7 +2778,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2824,7 +2850,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
                ret = I40E_ERR_PARAM;
                goto error_param;
@@ -2968,7 +2994,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -3088,9 +3114,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
        struct i40e_vsi *vsi = NULL;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
-           (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+           vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3119,9 +3145,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u16 i;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
            !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
-           (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+           vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3154,7 +3180,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int len = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3190,7 +3216,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
        struct i40e_hw *hw = &pf->hw;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3215,7 +3241,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3241,7 +3267,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        struct i40e_vsi *vsi;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3468,7 +3494,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3599,7 +3625,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        int i, ret;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err_out;
        }
@@ -3708,7 +3734,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
        i40e_status aq_ret = 0;
        u64 speed = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
@@ -3797,11 +3823,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
 
        /* set this flag only after making sure all inputs are sane */
        vf->adq_enabled = true;
-       /* num_req_queues is set when user changes number of queues via ethtool
-        * and this causes issue for default VSI(which depends on this variable)
-        * when ADq is enabled, hence reset it.
-        */
-       vf->num_req_queues = 0;
 
        /* reset the VF in order to allocate resources */
        i40e_vc_reset_vf(vf, true);
@@ -3824,7 +3845,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+       if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
                aq_ret = I40E_ERR_PARAM;
                goto err;
        }
index 091e32c1bb46fa12dc4a91afa10293ccac535cfe..49575a640a84c5e6c3abeaaae05427818b06fd87 100644 (file)
@@ -18,6 +18,8 @@
 
 #define I40E_MAX_VF_PROMISC_FLAGS      3
 
+#define I40E_VF_STATE_WAIT_COUNT       20
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
index 0cecaff38d0427c4d1ccf69e02c2d8d007e19ad1..461f5237a2f889f203be69379caf43b541036f56 100644 (file)
@@ -615,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
 
-       new_tx_count = clamp_t(u32, ring->tx_pending,
-                              IAVF_MIN_TXD,
-                              IAVF_MAX_TXD);
-       new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (ring->tx_pending > IAVF_MAX_TXD ||
+           ring->tx_pending < IAVF_MIN_TXD ||
+           ring->rx_pending > IAVF_MAX_RXD ||
+           ring->rx_pending < IAVF_MIN_RXD) {
+               netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+                          ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+                          IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+               return -EINVAL;
+       }
 
-       new_rx_count = clamp_t(u32, ring->rx_pending,
-                              IAVF_MIN_RXD,
-                              IAVF_MAX_RXD);
-       new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_tx_count != ring->tx_pending)
+               netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+                           new_tx_count);
+
+       new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+       if (new_rx_count != ring->rx_pending)
+               netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+                           new_rx_count);
 
        /* if nothing to do return success */
        if ((new_tx_count == adapter->tx_desc_count) &&
-           (new_rx_count == adapter->rx_desc_count))
+           (new_rx_count == adapter->rx_desc_count)) {
+               netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
                return 0;
+       }
 
-       adapter->tx_desc_count = new_tx_count;
-       adapter->rx_desc_count = new_rx_count;
+       if (new_tx_count != adapter->tx_desc_count) {
+               netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+                          adapter->tx_desc_count, new_tx_count);
+               adapter->tx_desc_count = new_tx_count;
+       }
+
+       if (new_rx_count != adapter->rx_desc_count) {
+               netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+                          adapter->rx_desc_count, new_rx_count);
+               adapter->rx_desc_count = new_rx_count;
+       }
 
        if (netif_running(netdev)) {
                adapter->flags |= IAVF_FLAG_RESET_NEEDED;
index 14934a7a13efdc5a2879ffbca446cfacdf2f45cc..4e7c04047f91760a88e7232415588a1f1d91b16d 100644 (file)
@@ -2046,6 +2046,7 @@ static void iavf_watchdog_task(struct work_struct *work)
                }
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+               mutex_unlock(&adapter->crit_lock);
                queue_delayed_work(iavf_wq,
                                   &adapter->watchdog_task,
                                   msecs_to_jiffies(10));
@@ -2076,16 +2077,14 @@ static void iavf_watchdog_task(struct work_struct *work)
                        iavf_detect_recover_hung(&adapter->vsi);
                break;
        case __IAVF_REMOVE:
-               mutex_unlock(&adapter->crit_lock);
-               return;
        default:
+               mutex_unlock(&adapter->crit_lock);
                return;
        }
 
        /* check for hw reset */
        reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
        if (!reg_val) {
-               iavf_change_state(adapter, __IAVF_RESETTING);
                adapter->flags |= IAVF_FLAG_RESET_PENDING;
                adapter->aq_required = 0;
                adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2248,6 +2247,7 @@ static void iavf_reset_task(struct work_struct *work)
        }
 
        pci_set_master(adapter->pdev);
+       pci_restore_msi_state(adapter->pdev);
 
        if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
index 1efc635cc0f5e2e774a0e2faee8435f750f168f5..fafe020e46eece18a11f65563ce17b25ba837c82 100644 (file)
@@ -6,6 +6,18 @@
 #include "ice_lib.h"
 #include "ice_dcb_lib.h"
 
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+       rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+       return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+       rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+       return !!rx_ring->rx_buf;
+}
+
 /**
  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
                                         ring->q_index, ring->q_vector->napi.napi_id);
 
+               kfree(ring->rx_buf);
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
+                       if (!ice_alloc_rx_buf_zc(ring))
+                               return -ENOMEM;
                        xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
                } else {
+                       if (!ice_alloc_rx_buf(ring))
+                               return -ENOMEM;
                        if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
                                /* coverity[check_return] */
                                xdp_rxq_info_reg(&ring->xdp_rxq,
index 7fdeb411b6df44063b9bf74db5840d9d0a4c51ed..3eb01731e496b34d2ac3d681b6a5c25abfbe6a54 100644 (file)
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
 
        new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
 
+       if (!bwcfg)
+               new_cfg->etscfg.tcbwtable[0] = 100;
+
        if (!bwrec)
                new_cfg->etsrec.tcbwtable[0] = 100;
 
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
        if (mode == pf->dcbx_cap)
                return ICE_DCB_NO_HW_CHG;
 
-       pf->dcbx_cap = mode;
        qos_cfg = &pf->hw.port_info->qos_cfg;
-       if (mode & DCB_CAP_DCBX_VER_CEE) {
-               if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
-                       return ICE_DCB_NO_HW_CHG;
+
+       /* DSCP configuration is not DCBx negotiated */
+       if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+               return ICE_DCB_NO_HW_CHG;
+
+       pf->dcbx_cap = mode;
+
+       if (mode & DCB_CAP_DCBX_VER_CEE)
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
-       } else {
+       else
                qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
-       }
 
        dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
        return ICE_DCB_HW_CHG_RST;
index 38960bcc384c03f94439977feefb27cdb96aff7a..b6e7f47c8c78fd68ddf7c4f863a8383d1e41e6ca 100644 (file)
@@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
                bool is_tun = tun == ICE_FD_HW_SEG_TUN;
                int err;
 
-               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+               if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
                        continue;
                err = ice_fdir_write_fltr(pf, input, add, is_tun);
                if (err)
@@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
        }
 
        /* return error if not an update and no available filters */
-       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+       fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
        if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
            ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
                dev_err(dev, "Failed to add filter.  The maximum number of flow director filters has been reached.\n");
index cbd8424631e32a0a854bd468da9a70b41fb88ea6..4dca009bdd50f2fb7771ab5ac25d0b7863119d01 100644 (file)
@@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
                memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
                loc = pkt;
        } else {
-               if (!ice_get_open_tunnel_port(hw, &tnl_port))
+               if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
                        return ICE_ERR_DOES_NOT_EXIST;
                if (!ice_fdir_pkt[idx].tun_pkt)
                        return ICE_ERR_PARAM;
index 23cfcceb1536dcac3cebf75cad36e58dd622c60f..6ad1c255972439a0dc60e256428f062e5138bf13 100644 (file)
@@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
  * ice_get_open_tunnel_port - retrieve an open tunnel port
  * @hw: pointer to the HW structure
  * @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
  */
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type)
 {
        bool res = false;
        u16 i;
@@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
        mutex_lock(&hw->tnl_lock);
 
        for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
-               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+               if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+                   (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
                        *port = hw->tnl.tbl[i].port;
                        res = true;
                        break;
index 344c2637facda3c821f1fed4f81ae13214334c92..a2863f38fd1fd5f6ef32d8375a5acbb31e4c2be9 100644 (file)
@@ -33,7 +33,8 @@ enum ice_status
 ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
                   unsigned long *bm, struct list_head *fv_list);
 bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+                        enum ice_tunnel_type type);
 int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
                            unsigned int idx, struct udp_tunnel_info *ti);
 int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
index 4d1fc48c9744b920609f9aa2f1059c60c62abeb1..73c61cdb036f9a86302aaaf76b9f367fe1099129 100644 (file)
@@ -5881,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
                netif_carrier_on(vsi->netdev);
        }
 
+       /* clear this now, and the first stats read will be used as baseline */
+       vsi->stat_offsets_loaded = false;
+
        ice_service_task_schedule(pf);
 
        return 0;
@@ -5927,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
 /**
  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
  * @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
  * @rings: rings to work on
  * @count: number of rings
  */
 static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
-                            u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+                            struct rtnl_link_stats64 *vsi_stats,
+                            struct ice_tx_ring **rings, u16 count)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
        u16 i;
 
        for (i = 0; i < count; i++) {
@@ -5958,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
  */
 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 {
-       struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+       struct rtnl_link_stats64 *vsi_stats;
        u64 pkts, bytes;
        int i;
 
-       /* reset netdev stats */
-       vsi_stats->tx_packets = 0;
-       vsi_stats->tx_bytes = 0;
-       vsi_stats->rx_packets = 0;
-       vsi_stats->rx_bytes = 0;
+       vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+       if (!vsi_stats)
+               return;
 
        /* reset non-netdev (extended) stats */
        vsi->tx_restart = 0;
@@ -5978,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
        rcu_read_lock();
 
        /* update Tx rings counters */
-       ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+       ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+                                    vsi->num_txq);
 
        /* update Rx rings counters */
        ice_for_each_rxq(vsi, i) {
@@ -5993,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
 
        /* update XDP Tx rings counters */
        if (ice_is_xdp_ena_vsi(vsi))
-               ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+               ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
                                             vsi->num_xdp_txq);
 
        rcu_read_unlock();
+
+       vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+       vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+       vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+       vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+       kfree(vsi_stats);
 }
 
 /**
index bf7247c6f58e259406e20cb456c750a8997a44d1..442b031b0edc04b5ffff932e65bb63d927dbddd8 100644 (file)
@@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
                scaled_ppm = -scaled_ppm;
        }
 
-       while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+       while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
                /* handle overflow by scaling down the scaled_ppm and
                 * the divisor, losing some precision
                 */
@@ -1540,19 +1540,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
                if (err)
                        continue;
 
-               /* Check if the timestamp is valid */
-               if (!(raw_tstamp & ICE_PTP_TS_VALID))
+               /* Check if the timestamp is invalid or stale */
+               if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+                   raw_tstamp == tx->tstamps[idx].cached_tstamp)
                        continue;
 
-               /* clear the timestamp register, so that it won't show valid
-                * again when re-used.
-                */
-               ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
                /* The timestamp is valid, so we'll go ahead and clear this
                 * index and then send the timestamp up to the stack.
                 */
                spin_lock(&tx->lock);
+               tx->tstamps[idx].cached_tstamp = raw_tstamp;
                clear_bit(idx, tx->in_use);
                skb = tx->tstamps[idx].skb;
                tx->tstamps[idx].skb = NULL;
index f71ad317d6c8f033c66e5e424669173886d3764a..53c15fc9d9961f3e540b33b33d7278f12ffc6001 100644 (file)
@@ -55,15 +55,21 @@ struct ice_perout_channel {
  * struct ice_tx_tstamp - Tracking for a single Tx timestamp
  * @skb: pointer to the SKB for this timestamp request
  * @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
  *
  * This structure tracks a single timestamp request. The SKB pointer is
  * provided when initiating a request. The start time is used to ensure that
  * we discard old requests that were not fulfilled within a 2 second time
  * window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
  */
 struct ice_tx_tstamp {
        struct sk_buff *skb;
        unsigned long start;
+       u64 cached_tstamp;
 };
 
 /**
index 793f4a9fc2cdb03fc233c4800eb52f546bdb89ac..183d9303389064dcd819eb0ed1f9173d333e6794 100644 (file)
@@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
  * ice_find_recp - find a recipe
  * @hw: pointer to the hardware structure
  * @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
  *
  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
  */
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+             enum ice_sw_tunnel_type tun_type)
 {
        bool refresh_required = true;
        struct ice_sw_recipe *recp;
@@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
                        }
                        /* If for "i"th recipe the found was never set to false
                         * then it means we found our match
+                        * Also tun type of recipe needs to be checked
                         */
-                       if (found)
+                       if (found && recp[i].tun_type == tun_type)
                                return i; /* Return the recipe ID */
                }
        }
@@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        }
 
        /* Look for a recipe which matches our requested fv / mask list */
-       *rid = ice_find_recp(hw, lkup_exts);
+       *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
        if (*rid < ICE_MAX_NUM_RECIPES)
                /* Success if found a recipe that match the existing criteria */
                goto err_unroll;
 
+       rm->tun_type = rinfo->tun_type;
        /* Recipe we need does not exist, add a recipe */
        status = ice_add_sw_recipe(hw, rm, profiles);
        if (status)
@@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
 
        switch (tun_type) {
        case ICE_SW_TUN_VXLAN:
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+                       return ICE_ERR_CFG;
+               break;
        case ICE_SW_TUN_GENEVE:
-               if (!ice_get_open_tunnel_port(hw, &open_port))
+               if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
                        return ICE_ERR_CFG;
                break;
-
        default:
                /* Nothing needs to be done for this tunnel type */
                return 0;
@@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
        if (status)
                return status;
 
-       rid = ice_find_recp(hw, &lkup_exts);
+       rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
        /* If did not find a recipe that match the existing criteria */
        if (rid == ICE_MAX_NUM_RECIPES)
                return ICE_ERR_PARAM;
index e5d23feb6701772e921e9e1df835a094359ea6ac..25cca5c4ae575bbc237ddaa63c434231ce074d90 100644 (file)
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
        return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
 }
 
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
 {
-       if (inner) {
-               switch (ip_proto) {
-               case IPPROTO_UDP:
-                       return ICE_UDP_ILOS;
-               }
-       } else {
-               switch (ip_proto) {
-               case IPPROTO_TCP:
-                       return ICE_TCP_IL;
-               case IPPROTO_UDP:
-                       return ICE_UDP_OF;
-               }
+       switch (ip_proto) {
+       case IPPROTO_TCP:
+               return ICE_TCP_IL;
+       case IPPROTO_UDP:
+               return ICE_UDP_ILOS;
        }
 
        return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
                i++;
        }
 
-       if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
-               list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+       if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+           hdr->l3_key.ip_proto == IPPROTO_UDP) {
+               list[i].type = ICE_UDP_OF;
                list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
                list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
                i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
                     ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
                struct ice_tc_l4_hdr *l4_key, *l4_mask;
 
-               list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+               list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
                l4_key = &headers->l4_key;
                l4_mask = &headers->l4_mask;
 
@@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
                headers->l3_mask.ttl = match.mask->ttl;
        }
 
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+           fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
                struct flow_match_ports match;
 
                flow_rule_match_enc_ports(rule, &match);
index bc3ba19dc88f8a79de13cb7b61e4f900eeec347a..dccf09eefc75492ddd41fc3848b936818d550611 100644 (file)
@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
        }
 
 rx_skip_free:
-       memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+       if (rx_ring->xsk_pool)
+               memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+       else
+               memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
 
        /* Zero out the descriptor ring */
        size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
                if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
                        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
        rx_ring->xdp_prog = NULL;
-       devm_kfree(rx_ring->dev, rx_ring->rx_buf);
-       rx_ring->rx_buf = NULL;
+       if (rx_ring->xsk_pool) {
+               kfree(rx_ring->xdp_buf);
+               rx_ring->xdp_buf = NULL;
+       } else {
+               kfree(rx_ring->rx_buf);
+               rx_ring->rx_buf = NULL;
+       }
 
        if (rx_ring->desc) {
                size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        /* warn if we are about to overwrite the pointer */
        WARN_ON(rx_ring->rx_buf);
        rx_ring->rx_buf =
-               devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
-                            GFP_KERNEL);
+               kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
        if (!rx_ring->rx_buf)
                return -ENOMEM;
 
@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        return 0;
 
 err:
-       devm_kfree(dev, rx_ring->rx_buf);
+       kfree(rx_ring->rx_buf);
        rx_ring->rx_buf = NULL;
        return -ENOMEM;
 }
index c56dd17499031614b85baad828447e198f2d2854..b7b3bd4816f0d1ffa5a34e7e4e5d75193d759ccd 100644 (file)
@@ -24,7 +24,6 @@
 #define ICE_MAX_DATA_PER_TXD_ALIGNED \
        (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
 
-#define ICE_RX_BUF_WRITE       16      /* Must be power of 2 */
 #define ICE_MAX_TXQ_PER_TXQG   128
 
 /* Attempt to maximize the headroom available for incoming frames. We use a 2K
index 217ff5e9a6f1434d00c67b8945048411ab315024..6427e7ec93de6a6d11d65c9a44c3382c5610dcdf 100644 (file)
@@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
                ice_vc_set_default_allowlist(vf);
 
                ice_vf_fdir_exit(vf);
+               ice_vf_fdir_init(vf);
                /* clean VF control VSI when resetting VFs since it should be
                 * setup only when VF creates its first FDIR rule.
                 */
@@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
        }
 
        ice_vf_fdir_exit(vf);
+       ice_vf_fdir_init(vf);
        /* clean VF control VSI when resetting VF since it should be setup
         * only when VF creates its first FDIR rule.
         */
@@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
        if (ret)
                goto err_unroll_sriov;
 
+       /* rearm global interrupts */
+       if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+               ice_irq_dynamic_ena(hw, NULL, NULL);
+
        return 0;
 
 err_unroll_sriov:
index ff55cb415b110fb9e63bd8adc6a401e1c78887ad..c895351b25e0aebb72b6aa7cb466cbd9e52fa503 100644 (file)
 #include "ice_txrx_lib.h"
 #include "ice_lib.h"
 
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+       return &rx_ring->xdp_buf[idx];
+}
+
 /**
  * ice_qp_reset_stats - Resets all stats for rings of given index
  * @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        dma_addr_t dma;
 
        rx_desc = ICE_RX_DESC(rx_ring, ntu);
-       xdp = &rx_ring->xdp_buf[ntu];
+       xdp = ice_xdp_buf(rx_ring, ntu);
 
        nb_buffs = min_t(u16, count, rx_ring->count - ntu);
        nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -383,20 +388,16 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        while (i--) {
                dma = xsk_buff_xdp_get_dma(*xdp);
                rx_desc->read.pkt_addr = cpu_to_le64(dma);
+               rx_desc->wb.status_error0 = 0;
 
                rx_desc++;
                xdp++;
        }
 
        ntu += nb_buffs;
-       if (ntu == rx_ring->count) {
-               rx_desc = ICE_RX_DESC(rx_ring, 0);
-               xdp = rx_ring->xdp_buf;
+       if (ntu == rx_ring->count)
                ntu = 0;
-       }
 
-       /* clear the status bits for the next_to_use descriptor */
-       rx_desc->wb.status_error0 = 0;
        ice_release_rx_desc(rx_ring, ntu);
 
        return count == nb_buffs;
@@ -418,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
 /**
  * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
  * @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
  *
  * This function allocates a new skb from a zero-copy Rx buffer.
  *
  * Returns the skb on success, NULL on failure.
  */
 static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
 {
-       struct xdp_buff *xdp = *xdp_arr;
+       unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
        unsigned int metasize = xdp->data - xdp->data_meta;
        unsigned int datasize = xdp->data_end - xdp->data;
-       unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
        struct sk_buff *skb;
 
        skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -444,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
                skb_metadata_set(skb, metasize);
 
        xsk_buff_free(xdp);
-       *xdp_arr = NULL;
        return skb;
 }
 
@@ -506,7 +505,6 @@ out_failure:
 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-       u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
        struct ice_tx_ring *xdp_ring;
        unsigned int xdp_xmit = 0;
        struct bpf_prog *xdp_prog;
@@ -521,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
        while (likely(total_rx_packets < (unsigned int)budget)) {
                union ice_32b_rx_flex_desc *rx_desc;
                unsigned int size, xdp_res = 0;
-               struct xdp_buff **xdp;
+               struct xdp_buff *xdp;
                struct sk_buff *skb;
                u16 stat_err_bits;
                u16 vlan_tag = 0;
@@ -539,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
+               xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
                size = le16_to_cpu(rx_desc->wb.pkt_len) &
                                   ICE_RX_FLX_DESC_PKT_LEN_M;
-               if (!size)
-                       break;
+               if (!size) {
+                       xdp->data = NULL;
+                       xdp->data_end = NULL;
+                       xdp->data_hard_start = NULL;
+                       xdp->data_meta = NULL;
+                       goto construct_skb;
+               }
 
-               xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
-               xsk_buff_set_size(*xdp, size);
-               xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+               xsk_buff_set_size(xdp, size);
+               xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
 
-               xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+               xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
                if (xdp_res) {
                        if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
                                xdp_xmit |= xdp_res;
                        else
-                               xsk_buff_free(*xdp);
+                               xsk_buff_free(xdp);
 
-                       *xdp = NULL;
                        total_rx_bytes += size;
                        total_rx_packets++;
-                       cleaned_count++;
 
                        ice_bump_ntc(rx_ring);
                        continue;
                }
-
+construct_skb:
                /* XDP_PASS path */
                skb = ice_construct_skb_zc(rx_ring, xdp);
                if (!skb) {
@@ -571,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                        break;
                }
 
-               cleaned_count++;
                ice_bump_ntc(rx_ring);
 
                if (eth_skb_pad(skb)) {
@@ -593,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
                ice_receive_skb(rx_ring, skb, vlan_tag);
        }
 
-       if (cleaned_count >= ICE_RX_BUF_WRITE)
-               failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+       failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
 
        ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
        ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -810,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
  */
 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
 {
-       u16 i;
-
-       for (i = 0; i < rx_ring->count; i++) {
-               struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+       u16 count_mask = rx_ring->count - 1;
+       u16 ntc = rx_ring->next_to_clean;
+       u16 ntu = rx_ring->next_to_use;
 
-               if (!xdp)
-                       continue;
+       for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+               struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
 
-               *xdp = NULL;
+               xsk_buff_free(xdp);
        }
 }
 
index fd54d3ef890bc191503d5ace25e91ead932a2098..446894dde18204a52944753a0d1a69683ef77461 100644 (file)
@@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
        struct vf_mac_filter *entry = NULL;
        int ret = 0;
 
+       if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+           !vf_data->trusted) {
+               dev_warn(&pdev->dev,
+                        "VF %d requested MAC filter but is administratively denied\n",
+                         vf);
+               return -EINVAL;
+       }
+       if (!is_valid_ether_addr(addr)) {
+               dev_warn(&pdev->dev,
+                        "VF %d attempted to set invalid MAC filter\n",
+                         vf);
+               return -EINVAL;
+       }
+
        switch (info) {
        case E1000_VF_MAC_FILTER_CLR:
                /* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
                }
                break;
        case E1000_VF_MAC_FILTER_ADD:
-               if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
-                   !vf_data->trusted) {
-                       dev_warn(&pdev->dev,
-                                "VF %d requested MAC filter but is administratively denied\n",
-                                vf);
-                       return -EINVAL;
-               }
-               if (!is_valid_ether_addr(addr)) {
-                       dev_warn(&pdev->dev,
-                                "VF %d attempted to set invalid MAC filter\n",
-                                vf);
-                       return -EINVAL;
-               }
-
                /* try to find empty slot in the list */
                list_for_each(pos, &adapter->vf_macs.l) {
                        entry = list_entry(pos, struct vf_mac_filter, l);
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
        return __igb_shutdown(to_pci_dev(dev), NULL, 0);
 }
 
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
 
        wr32(E1000_WUS, ~0);
 
-       rtnl_lock();
+       if (!rpm)
+               rtnl_lock();
        if (!err && netif_running(netdev))
                err = __igb_open(netdev, true);
 
        if (!err)
                netif_device_attach(netdev);
-       rtnl_unlock();
+       if (!rpm)
+               rtnl_unlock();
 
        return err;
 }
 
+static int __maybe_unused igb_resume(struct device *dev)
+{
+       return __igb_resume(dev, false);
+}
+
 static int __maybe_unused igb_runtime_idle(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
 
 static int __maybe_unused igb_runtime_resume(struct device *dev)
 {
-       return igb_resume(dev);
+       return __igb_resume(dev, true);
 }
 
 static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  *  @pdev: Pointer to PCI device
  *
  *  Restart the card from scratch, as if from a cold-boot. Implementation
- *  resembles the first-half of the igb_resume routine.
+ *  resembles the first-half of the __igb_resume routine.
  **/
 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
 {
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  *
  *  This callback is called when the error recovery driver tells us that
  *  its OK to resume normal operation. Implementation resembles the
- *  second-half of the igb_resume routine.
+ *  second-half of the __igb_resume routine.
  */
 static void igb_io_resume(struct pci_dev *pdev)
 {
index 74ccd622251a2396d3628dc38f5f287534de3eec..4d988da68394dc1c0efc63cb56614231c8d39bc2 100644 (file)
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_hw_init:
+       netif_napi_del(&adapter->rx_ring->napi);
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 err_sw_init:
index b2ef9fde97b38a7def197b6b844b5e6909c54d96..b6807e16eea9393304c7abb68f90648a7bd1c968 100644 (file)
@@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
                ltrv = rd32(IGC_LTRMAXV);
                if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
                        ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
-                              (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+                              (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
                        wr32(IGC_LTRMAXV, ltrv);
                }
        }
index 0f9f022260d70f0653960690a19e89d6ac4bed8c..45e2ec4d264d9aa2b300e14429b857271a9ef14c 100644 (file)
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
        if (!speed && hw->mac.ops.get_link_capabilities) {
                ret = hw->mac.ops.get_link_capabilities(hw, &speed,
                                                        &autoneg);
+               /* remove NBASE-T speeds from default autonegotiation
+                * to accommodate broken network switches in the field
+                * which cannot cope with advertised NBASE-T speeds
+                */
                speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
                           IXGBE_LINK_SPEED_2_5GB_FULL);
        }
index 9724ffb165189e4629bc8665390d6b939896cffe..e4b50c7781ffaca5cbda799a0c3633d82648e6d8 100644 (file)
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
        /* flush pending Tx transactions */
        ixgbe_clear_tx_pending(hw);
 
+       /* set MDIO speed before talking to the PHY in case it's the 1st time */
+       ixgbe_set_mdio_speed(hw);
+
        /* PHY ops must be identified and initialized prior to reset */
        status = hw->phy.ops.init(hw);
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
index 0da09ea81980981b809f9b2c7eb2956e0ae4198e..96bd6f2b21ed9ba57b265ac2dc0d332074992c09 100644 (file)
@@ -71,6 +71,8 @@ struct xrx200_priv {
        struct xrx200_chan chan_tx;
        struct xrx200_chan chan_rx;
 
+       u16 rx_buf_size;
+
        struct net_device *net_dev;
        struct device *dev;
 
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
        xrx200_pmac_w32(priv, val, offset);
 }
 
+static int xrx200_max_frame_len(int mtu)
+{
+       return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+       return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
 /* drop all the packets from the DMA ring */
 static void xrx200_flush_dma(struct xrx200_chan *ch)
 {
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
                        break;
 
                desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
-                           (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
-                            ETH_FCS_LEN);
+                           ch->priv->rx_buf_size;
                ch->dma.desc++;
                ch->dma.desc %= LTQ_DESC_NUM;
        }
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
 
 static int xrx200_alloc_skb(struct xrx200_chan *ch)
 {
-       int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
        struct sk_buff *skb = ch->skb[ch->dma.desc];
+       struct xrx200_priv *priv = ch->priv;
        dma_addr_t mapping;
        int ret = 0;
 
-       ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
-                                                         len);
+       ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+                                                         priv->rx_buf_size);
        if (!ch->skb[ch->dma.desc]) {
                ret = -ENOMEM;
                goto skip;
        }
 
-       mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
-                                len, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+       mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+                                priv->rx_buf_size, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(priv->dev, mapping))) {
                dev_kfree_skb_any(ch->skb[ch->dma.desc]);
                ch->skb[ch->dma.desc] = skb;
                ret = -ENOMEM;
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
        wmb();
 skip:
        ch->dma.desc_base[ch->dma.desc].ctl =
-               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+               LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
 
        return ret;
 }
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
        int ret = 0;
 
        net_dev->mtu = new_mtu;
+       priv->rx_buf_size = xrx200_buffer_size(new_mtu);
 
        if (new_mtu <= old_mtu)
                return ret;
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
                ret = xrx200_alloc_skb(ch_rx);
                if (ret) {
                        net_dev->mtu = old_mtu;
+                       priv->rx_buf_size = xrx200_buffer_size(old_mtu);
                        break;
                }
                dev_kfree_skb_any(skb);
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
        net_dev->netdev_ops = &xrx200_netdev_ops;
        SET_NETDEV_DEV(net_dev, dev);
        net_dev->min_mtu = ETH_ZLEN;
-       net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+       net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+       priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
 
        /* load the memory ranges */
        priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
index ce486e16489c59609c6c07c14fe9f2f4f5981b13..6da8a595026bbbb08c8a99e288579455316f31ba 100644 (file)
@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
 
        if (priv->percpu_pools) {
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_free_dma;
 
-               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
                if (err < 0)
                        goto err_unregister_rxq_short;
 
@@ -7458,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev)
 
        shared = num_present_cpus() - priv->nthreads;
        if (shared > 0)
-               bitmap_fill(&priv->lock_map,
+               bitmap_set(&priv->lock_map, 0,
                            min_t(int, shared, MVPP2_MAX_THREADS));
 
        for (i = 0; i < MVPP2_MAX_THREADS; i++) {
index cb56e171ddd4c27d420f8f38604f51c1275aa3db..3ca6b942ebe2539ab9fc2d10f8cbe48934157588 100644 (file)
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
                        goto free_regions;
                break;
        default:
-               return err;
+               goto free_regions;
        }
 
        mw->mbox_wq = alloc_workqueue(name,
index 0ef68fdd1f26bf4e80f62da41c0dd8e9471da228..61c20907315f4d7cfe86e19f927954c1dbc5bc40 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#include <linux/module.h>
+
 #include "otx2_common.h"
 #include "otx2_ptp.h"
 
index 4369a3ffad45b7c660c5a93ecb20225e5afa7f3c..c687dc9aa97372a8c33d009ff18be2de14e75e96 100644 (file)
@@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
 struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
                                                 u32 dev_id, u32 hw_id)
 {
-       struct prestera_port *port = NULL;
+       struct prestera_port *port = NULL, *tmp;
 
        read_lock(&sw->port_list_lock);
-       list_for_each_entry(port, &sw->port_list, list) {
-               if (port->dev_id == dev_id && port->hw_id == hw_id)
+       list_for_each_entry(tmp, &sw->port_list, list) {
+               if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+                       port = tmp;
                        break;
+               }
        }
        read_unlock(&sw->port_list_lock);
 
@@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
 
 struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
 {
-       struct prestera_port *port = NULL;
+       struct prestera_port *port = NULL, *tmp;
 
        read_lock(&sw->port_list_lock);
-       list_for_each_entry(port, &sw->port_list, list) {
-               if (port->id == id)
+       list_for_each_entry(tmp, &sw->port_list, list) {
+               if (tmp->id == id) {
+                       port = tmp;
                        break;
+               }
        }
        read_unlock(&sw->port_list_lock);
 
@@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
                                      struct net_device *dev,
                                      unsigned long event, void *ptr)
 {
-       struct netdev_notifier_changeupper_info *info = ptr;
+       struct netdev_notifier_info *info = ptr;
+       struct netdev_notifier_changeupper_info *cu_info;
        struct prestera_port *port = netdev_priv(dev);
        struct netlink_ext_ack *extack;
        struct net_device *upper;
 
-       extack = netdev_notifier_info_to_extack(&info->info);
-       upper = info->upper_dev;
+       extack = netdev_notifier_info_to_extack(info);
+       cu_info = container_of(info,
+                              struct netdev_notifier_changeupper_info,
+                              info);
 
        switch (event) {
        case NETDEV_PRECHANGEUPPER:
+               upper = cu_info->upper_dev;
                if (!netif_is_bridge_master(upper) &&
                    !netif_is_lag_master(upper)) {
                        NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
                        return -EINVAL;
                }
 
-               if (!info->linking)
+               if (!cu_info->linking)
                        break;
 
                if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
                }
 
                if (netif_is_lag_master(upper) &&
-                   !prestera_lag_master_check(upper, info->upper_info, extack))
+                   !prestera_lag_master_check(upper, cu_info->upper_info, extack))
                        return -EOPNOTSUPP;
                if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
                        NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
                break;
 
        case NETDEV_CHANGEUPPER:
+               upper = cu_info->upper_dev;
                if (netif_is_bridge_master(upper)) {
-                       if (info->linking)
+                       if (cu_info->linking)
                                return prestera_bridge_port_join(upper, port,
                                                                 extack);
                        else
                                prestera_bridge_port_leave(upper, port);
                } else if (netif_is_lag_master(upper)) {
-                       if (info->linking)
+                       if (cu_info->linking)
                                return prestera_lag_port_add(port, upper);
                        else
                                prestera_lag_port_del(port);
index 066d79e4ecfc28d8bfc5fa7ced33f3192a339cb9..10238bedd694fe7752edb5734a2dc1aa4a5183b3 100644 (file)
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
-                                      ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+                                      ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
                                       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
                                       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
-                                      ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+                                      ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
        MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
                                       ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
                                       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
index 3f6d5c38463723e23be94af802bdf707f9a7c4e2..f1c10f2bda780a1d4a9dabe3c1a126b80809e5e9 100644 (file)
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                bool carry_xdp_prog)
 {
        struct bpf_prog *xdp_prog;
-       int i, t;
+       int i, t, ret;
 
-       mlx4_en_copy_priv(tmp, priv, prof);
+       ret = mlx4_en_copy_priv(tmp, priv, prof);
+       if (ret) {
+               en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+                       __func__);
+               return ret;
+       }
 
        if (mlx4_en_alloc_resources(tmp)) {
                en_warn(priv,
index 8eaa24d865c55b0c50088db22abf9e2c0c271989..a46284ca517200dde86231bf3aa741c226774e48 100644 (file)
@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DEALLOC_SF:
        case MLX5_CMD_OP_DESTROY_UCTX:
        case MLX5_CMD_OP_DESTROY_UMEM:
+       case MLX5_CMD_OP_MODIFY_RQT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_MODIFY_TIS:
        case MLX5_CMD_OP_QUERY_TIS:
        case MLX5_CMD_OP_CREATE_RQT:
-       case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
 
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
index 14295384799606cc69cdc7e421ad835d9f32d67c..0015a81eb9a17b6550cb5038baa8b5271b7580de 100644 (file)
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
        unsigned int max_nch;
        u32 drop_rqn;
 
+       struct mlx5e_packet_merge_param pkt_merge_param;
+       struct rw_semaphore pkt_merge_param_sem;
+
        struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
        bool rss_active;
        u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
        if (err)
                goto out;
 
+       /* Separated from the channels RQs, does not share pkt_merge state with them */
        mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
                                    mlx5e_rqt_get_rqtn(&res->ptp.rqt),
                                    inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
        res->max_nch = max_nch;
        res->drop_rqn = drop_rqn;
 
+       res->pkt_merge_param = *init_pkt_merge_param;
+       init_rwsem(&res->pkt_merge_param_sem);
+
        err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
        if (err)
                goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
        return mlx5e_tir_get_tirn(&res->ptp.tir);
 }
 
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
 {
        return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
 }
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
        if (!builder)
                return -ENOMEM;
 
+       down_write(&res->pkt_merge_param_sem);
+       res->pkt_merge_param = *pkt_merge_param;
+
        mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
 
        final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
                }
        }
 
+       up_write(&res->pkt_merge_param_sem);
        mlx5e_tir_builder_free(builder);
        return final_err;
 }
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
 {
        return mlx5e_rss_get_hash(res->rss[0]);
 }
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir)
+{
+       bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+       struct mlx5e_tir_builder *builder;
+       u32 rqtn;
+       int err;
+
+       builder = mlx5e_tir_builder_alloc(false);
+       if (!builder)
+               return -ENOMEM;
+
+       rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+       mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+                                   inner_ft_support);
+       mlx5e_tir_builder_build_direct(builder);
+       mlx5e_tir_builder_build_tls(builder);
+       down_read(&res->pkt_merge_param_sem);
+       mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+       err = mlx5e_tir_init(tir, builder, res->mdev, false);
+       up_read(&res->pkt_merge_param_sem);
+
+       mlx5e_tir_builder_free(builder);
+
+       return err;
+}
index d09f7d174a5180a18cb0db8948bf043323325e82..b39b20a720e0fa244b2047d2ee3ad969f18bfdc9 100644 (file)
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
 u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
 u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
 
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
 /* Activate/deactivate API */
 void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
 void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
 /* Workaround for hairpin */
 struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
 
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+                               struct mlx5e_tir *tir);
 #endif /* __MLX5_EN_RX_RES_H__ */
index fb5397324aa4f2b597a1f1b0bcd355836e0c382f..2db9573a3fe69d9f175c663334cc6c6c14ad5ec0 100644 (file)
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
                        eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
                        eseg->swp_inner_l4_offset =
                                (skb->csum_start + skb->head - skb->data) / 2;
-                       if (skb->protocol == htons(ETH_P_IPV6))
+                       if (inner_ip_hdr(skb)->version == 6)
                                eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
                        break;
                default:
index a2a9f68579dd82d544b64a9e12a179552119e824..15711814d2d28d8498eec641230470706df14115 100644 (file)
@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
        return resp_list;
 }
 
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
-       struct mlx5e_tir_builder *builder;
-       int err;
-
-       builder = mlx5e_tir_builder_alloc(false);
-       if (!builder)
-               return -ENOMEM;
-
-       mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
-       mlx5e_tir_builder_build_direct(builder);
-       mlx5e_tir_builder_build_tls(builder);
-       err = mlx5e_tir_init(tir, builder, mdev, false);
-
-       mlx5e_tir_builder_free(builder);
-
-       return err;
-}
-
 static void accel_rule_handle_work(struct work_struct *work)
 {
        struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        struct mlx5_core_dev *mdev;
        struct mlx5e_priv *priv;
        int rxq, err;
-       u32 rqtn;
 
        tls_ctx = tls_get_ctx(sk);
        priv = netdev_priv(netdev);
@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
-       rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
-       err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+       err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
        if (err)
                goto err_create_tir;
 
index e58a9ec4255322538e6cb847f8dddecc6659754c..48895d79796a82634441668f2a0369a5e7b7e96d 100644 (file)
@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
        &MLX5E_STATS_GRP(pme),
        &MLX5E_STATS_GRP(channels),
        &MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+       &MLX5E_STATS_GRP(ipsec_sw),
+       &MLX5E_STATS_GRP(ipsec_hw),
+#endif
 };
 
 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
index 96967b0a24418c5aa4018f5396a4a73f731a0c37..793511d5ee4cd969d15c16c7e20af5914161e8f7 100644 (file)
@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
                                     u16 klm_entries, u16 index)
 {
        struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
-       u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+       u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
        u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
        struct page *page = shampo->last_page;
        u64 addr = shampo->last_addr;
        struct mlx5e_dma_info *dma_info;
        struct mlx5e_umr_wqe *umr_wqe;
-       int headroom;
+       int headroom, i;
 
        headroom = rq->buff.headroom;
        new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -601,9 +601,7 @@ update_klm:
 
 err_unmap:
        while (--i >= 0) {
-               if (--index < 0)
-                       index = shampo->hd_per_wq - 1;
-               dma_info = &shampo->info[index];
+               dma_info = &shampo->info[--index];
                if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
                        dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
                        mlx5e_page_release(rq, dma_info, true);
index c6cc67cb4f6add88e0b1f24f00c6112fd8c521d1..d377ddc70fc70b072c441d8a512ce98781e17e71 100644 (file)
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
        /* If vports min rate divider is 0 but their group has bw_share configured, then
         * need to set bw_share for vports to minimal value.
         */
-       if (!group_level && !max_guarantee && group->bw_share)
+       if (!group_level && !max_guarantee && group && group->bw_share)
                return 1;
        return 0;
 }
@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
                return err;
 
        /* Recalculate bw share weights of old and new groups */
-       if (vport->qos.bw_share) {
+       if (vport->qos.bw_share || new_group->bw_share) {
                esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
                esw_qos_normalize_vports_min_rate(esw, new_group, extack);
        }
index a46455694f7ae79eec21c461f9668ff39401f540..32bc08a399256c5ce3bcfed8b9de181ec76045b2 100644 (file)
@@ -329,14 +329,25 @@ static bool
 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
 {
        struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+       bool result = false;
        int i;
 
-       for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+       /* Indirect table is supported only for flows with in_port uplink
+        * and the destination is vport on the same eswitch as the uplink,
+        * return false in case at least one of destinations doesn't meet
+        * this criteria.
+        */
+       for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
                if (esw_attr->dests[i].rep &&
                    mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
-                                               esw_attr->dests[i].mdev))
-                       return true;
-       return false;
+                                               esw_attr->dests[i].mdev)) {
+                       result = true;
+               } else {
+                       result = false;
+                       break;
+               }
+       }
+       return result;
 }
 
 static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
        struct mlx5_eswitch *esw = master->priv.eswitch;
        struct mlx5_flow_table_attr ft_attr = {
                .max_fte = 1, .prio = 0, .level = 0,
+               .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
        };
        struct mlx5_flow_namespace *egress_ns;
        struct mlx5_flow_table *acl;
index 64f1abc4dc367fe1d05787a571742b9522cdfd93..3ca998874c50d583bbe3713d950bcaf7aa7f950b 100644 (file)
@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 
        health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
        add_timer(&health->timer);
+
+       if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 }
 
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
        INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
        INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
        INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
-       if (mlx5_core_is_pf(dev))
-               queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
 
        return 0;
 
index ad63dd45c8fb9dbf4d401e16c35f5343b41af857..a6592f9c3c05fc8d2c38f9ea491e3360e9527e80 100644 (file)
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
        if (port_sel->tunnel)
                mlx5_destroy_ttc_table(port_sel->inner.ttc);
        mlx5_lag_destroy_definers(ldev);
+       memset(port_sel, 0, sizeof(*port_sel));
 }
index 0dd96a6b140dddfd993ca1a57aa5ffd8acc7d2c0..c1df0d3595d87e283994af8d7a798d93da7842b6 100644 (file)
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
        dev->timeouts->to[type] = val;
 }
 
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
 {
        int i;
 
-       for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+       for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
                tout_set(dev, tout_def_sw_val[i], i);
 }
 
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
        if (!dev->timeouts)
                return -ENOMEM;
 
-       tout_set_def_val(dev);
        return 0;
 }
 
index 31faa5c17aa91c89d9e428bb82ef2074e4cfc19a..1c42ead782fa7f4470a3f58dc1d9b2cd4c57e990 100644 (file)
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
 void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
 void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
 int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
 u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
 
 #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
index a92a92a52346d8c33c2d272fb8a087bb81a07a22..7df9c7f8d9c8ad27fd624bffb98ddfd80ae75303 100644 (file)
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (mlx5_core_is_pf(dev))
                pcie_print_link_status(dev->pdev);
 
-       err = mlx5_tout_init(dev);
-       if (err) {
-               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
-               return err;
-       }
+       mlx5_tout_set_def_val(dev);
 
        /* wait for firmware to accept initialization segments configurations
         */
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
        if (err) {
                mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
                              mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
-               goto err_tout_cleanup;
+               return err;
        }
 
        err = mlx5_cmd_init(dev);
        if (err) {
                mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
-               goto err_tout_cleanup;
+               return err;
        }
 
        mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
 
        mlx5_set_driver_version(dev);
 
-       mlx5_start_health_poll(dev);
-
        err = mlx5_query_hca_caps(dev);
        if (err) {
                mlx5_core_err(dev, "query hca failed\n");
-               goto stop_health;
+               goto reclaim_boot_pages;
        }
 
+       mlx5_start_health_poll(dev);
+
        return 0;
 
-stop_health:
-       mlx5_stop_health_poll(dev, boot);
 reclaim_boot_pages:
        mlx5_reclaim_startup_pages(dev);
 err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
 err_cmd_cleanup:
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
-       mlx5_tout_cleanup(dev);
 
        return err;
 }
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
        mlx5_core_disable_hca(dev, 0);
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
        mlx5_cmd_cleanup(dev);
-       mlx5_tout_cleanup(dev);
 
        return 0;
 }
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
                                            mlx5_debugfs_root);
        INIT_LIST_HEAD(&priv->traps);
 
+       err = mlx5_tout_init(dev);
+       if (err) {
+               mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+               goto err_timeout_init;
+       }
+
        err = mlx5_health_init(dev);
        if (err)
                goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
 err_pagealloc_init:
        mlx5_health_cleanup(dev);
 err_health_init:
+       mlx5_tout_cleanup(dev);
+err_timeout_init:
        debugfs_remove(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
        mlx5_adev_cleanup(dev);
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
+       mlx5_tout_cleanup(dev);
        debugfs_remove_recursive(dev->priv.dbg_root);
        mutex_destroy(&priv->pgdir_mutex);
        mutex_destroy(&priv->alloc_mutex);
index 217e3b351dfe6048f90e9f93930a23fde8b2e09f..c34833ff1dded21a29c0af0f296ec4749c13f454 100644 (file)
@@ -8494,7 +8494,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
        u8 mac_profile;
        int err;
 
-       if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+       if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+           !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
                return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
 
        err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
index 2e25798c610eed8f167710d53673504c00eff660..7f49042484bdcebd05def5bfd8be8956cb3d13df 100644 (file)
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
                return ret;
 
        netdev->irq = platform_get_irq(pdev, 0);
+       if (netdev->irq < 0)
+               return netdev->irq;
 
        return ks8851_probe_common(netdev, dev, msg_enable);
 }
index 34b971ff8ef8ba79c99b2b64eb49fba56a5552bf..078d6a5a0768876cda1f3762def4fa0fdc44b3ca 100644 (file)
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
        if (err)
                goto out;
 
-       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
-                                    &hwc_wq->msg_buf);
-       if (err)
-               goto out;
-
        hwc_wq->hwc = hwc;
        hwc_wq->gdma_wq = queue;
        hwc_wq->queue_depth = q_depth;
        hwc_wq->hwc_cq = hwc_cq;
 
+       err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+                                    &hwc_wq->msg_buf);
+       if (err)
+               goto out;
+
        *hwc_wq_ptr = hwc_wq;
        return 0;
 out:
index 409cde1e59c6f2caeb34848ca405cc873007c7ed..1e4ad953cffbc5fc066954c75f6b2bf9469142e9 100644 (file)
@@ -1563,8 +1563,10 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
        }
 
        err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
-       if (err)
+       if (err) {
+               mutex_unlock(&ocelot->ptp_lock);
                return err;
+       }
 
        if (l2 && l4)
                cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
index ca4686094701c60eff5a9991990575257f1310ca..0a02d8bd0a3e57ed996c798a5e4fa7875f422338 100644 (file)
@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
 };
 
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
 {
        unsigned int silicon_revision;
        struct sonic_local *lp = netdev_priv(dev);
index d7ac0307797fd8f2fc60b6f1b6b4873e80688091..34c0d2ddf9ef6aad21755a7a453a05cdcf2a58d6 100644 (file)
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
                return -ENOMEM;
 
        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
-       if (!cache)
+       if (!cache) {
+               nfp_cpp_area_free(area);
                return -ENOMEM;
+       }
 
        cache->id = 0;
        cache->addr = 0;
index 065e9004598ee8f37e4669f48b5239a39426d62b..999abcfe3310a81a51554ba01cf7ff7b5aaf7077 100644 (file)
@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        data_split = true;
                }
        } else {
+               if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+                       DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+                       qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
                val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
                         ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
        }
index 1e6d72adfe43994064aaadddc26e56b2fe20c799..71523d747e93f3ae109cc1b27319c567e6008d3c 100644 (file)
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
 
        spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
-       err = ql_wait_for_drvr_lock(qdev);
-       if (err) {
-               err = ql_adapter_initialize(qdev);
-               if (err) {
-                       netdev_err(ndev, "Unable to initialize adapter\n");
-                       goto err_init;
-               }
-               netdev_err(ndev, "Releasing driver lock\n");
-               ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
-       } else {
+       if (!ql_wait_for_drvr_lock(qdev)) {
                netdev_err(ndev, "Could not acquire driver lock\n");
+               err = -ENODEV;
                goto err_lock;
        }
 
+       err = ql_adapter_initialize(qdev);
+       if (err) {
+               netdev_err(ndev, "Unable to initialize adapter\n");
+               goto err_init;
+       }
+       ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
        spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
        set_bit(QL_ADAPTER_UP, &qdev->flags);
index d51bac7ba5afadca6df37a8761838432fba08b1f..bd06076803295fb5a6a0946db8be6bf4ad901076 100644 (file)
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
        sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
        context_id = recv_ctx->context_id;
        num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
-       ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
-                                   QLCNIC_CMD_ADD_RCV_RINGS);
+       err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+                                       QLCNIC_CMD_ADD_RCV_RINGS);
+       if (err) {
+               dev_err(&adapter->pdev->dev,
+                       "Failed to alloc mbx args %d\n", err);
+               return err;
+       }
+
        cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
 
        /* set up status rings, mbx 2-81 */
index 7160b42f51ddd0ab11bfd8c94f77d506efd8efca..d0111cb3b40e1c5f1e0804e7d8b5dddcbc863541 100644 (file)
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
                                   struct qlcnic_info *, u16);
 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
                              struct qlcnic_vf_info *, u16);
index dd03be3fc82a972b9560049ff41703ec1a0ba501..42a44c97572aec68ea0ca3b69cc90b5e7abec43a 100644 (file)
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
                                            struct qlcnic_cmd_args *cmd)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
-       int i, num_vlans;
+       int i, num_vlans, ret;
        u16 *vlans;
 
        if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
        dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
                 sriov->num_allowed_vlans);
 
-       qlcnic_sriov_alloc_vlans(adapter);
+       ret = qlcnic_sriov_alloc_vlans(adapter);
+       if (ret)
+               return ret;
 
        if (!sriov->any_vlan)
                return 0;
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
        return err;
 }
 
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_sriov *sriov = adapter->ahw->sriov;
        struct qlcnic_vf_info *vf;
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
                vf = &sriov->vf_info[i];
                vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
                                          sizeof(*vf->sriov_vlans), GFP_KERNEL);
+               if (!vf->sriov_vlans)
+                       return -ENOMEM;
        }
+
+       return 0;
 }
 
 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
index 447720b93e5ab3b04622429d0eb3d20b4f4edeb4..e90fa97c0ae6c36bf18fffb822d78ce4f77448f8 100644 (file)
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
        if (err)
                goto del_flr_queue;
 
-       qlcnic_sriov_alloc_vlans(adapter);
+       err = qlcnic_sriov_alloc_vlans(adapter);
+       if (err)
+               goto del_flr_queue;
 
        return err;
 
index 6aa81229b68a9e02ff34273cbb82563fd2d3001c..e77a5cb4e40d794ace1b4efee5bc4aee51b45e1b 100644 (file)
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
        ef100_common_stat_mask(mask);
        ef100_ethtool_stat_mask(mask);
 
+       if (!mc_stats)
+               return 0;
+
        efx_nic_copy_stats(efx, mc_stats);
        efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
                             stats, mc_stats, false);
index 966f13e7475ddb7e650b8c05516e90810862d02e..11a6aee852e92ec4e6a6556e4213837c6531736c 100644 (file)
@@ -728,7 +728,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
                                            efx->rx_bufs_per_page);
        rx_queue->page_ring = kcalloc(page_ring_size,
                                      sizeof(*rx_queue->page_ring), GFP_KERNEL);
-       rx_queue->page_ptr_mask = page_ring_size - 1;
+       if (!rx_queue->page_ring)
+               rx_queue->page_ptr_mask = 0;
+       else
+               rx_queue->page_ptr_mask = page_ring_size - 1;
 }
 
 void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
index 68fc7d317693b5532f647f36ef2d62ee66e8765f..0983abc0cc5f08ad3cfac02e5362807bb1707e07 100644 (file)
@@ -150,7 +150,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
                                            efx->rx_bufs_per_page);
        rx_queue->page_ring = kcalloc(page_ring_size,
                                      sizeof(*rx_queue->page_ring), GFP_KERNEL);
-       rx_queue->page_ptr_mask = page_ring_size - 1;
+       if (!rx_queue->page_ring)
+               rx_queue->page_ptr_mask = 0;
+       else
+               rx_queue->page_ptr_mask = page_ring_size - 1;
 }
 
 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
index 89381f79698552d30858e429687d6a39c5286db0..dd6f69ced4ee31f758681a878b3363a7388b3148 100644 (file)
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
 
        ndev->dma = (unsigned char)-1;
        ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq < 0) {
+               ret = ndev->irq;
+               goto release_both;
+       }
+
        lp = netdev_priv(ndev);
        lp->netdev = ndev;
 #ifdef SMC_DYNAMIC_BUS_CONFIG
index 6924a6aacbd53c8310a1476b82d5978f87094b5b..c469abc91fa1b04966f276042c60e1fa0f8696a3 100644 (file)
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
        void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
        void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
        void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+       bool regs_valid;
        u32 regs[];
 };
 
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
        .set_to_rmii = rk3568_set_to_rmii,
        .set_rgmii_speed = rk3568_set_gmac_speed,
        .set_rmii_speed = rk3568_set_gmac_speed,
+       .regs_valid = true,
        .regs = {
                0xfe2a0000, /* gmac0 */
                0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
         * to be distinguished.
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res) {
+       if (res && ops->regs_valid) {
                int i = 0;
 
                while (ops->regs[i]) {
index 66fc8be34bb71738d1e98774d96d23059eadc0a0..e2e0f977875d7026b030564c3828af022271721f 100644 (file)
@@ -26,7 +26,7 @@
 #define ETHER_CLK_SEL_FREQ_SEL_125M    (BIT(9) | BIT(8))
 #define ETHER_CLK_SEL_FREQ_SEL_50M     BIT(9)
 #define ETHER_CLK_SEL_FREQ_SEL_25M     BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M    BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M    0
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
 #define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
index 5f129733aabd2e914a84a89b72e90bd3a41caaa2..873b9e3e5da25ba1e357db7761514b85e0ebb9c1 100644 (file)
@@ -172,6 +172,19 @@ struct stmmac_flow_entry {
        int is_l4;
 };
 
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+       STMMAC_RFS_T_VLAN,
+       STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+       unsigned long cookie;
+       int in_use;
+       int type;
+       int tc;
+};
+
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
        u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -289,6 +302,10 @@ struct stmmac_priv {
        struct stmmac_tc_entry *tc_entries;
        unsigned int flow_entries_max;
        struct stmmac_flow_entry *flow_entries;
+       unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+       unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+       unsigned int rfs_entries_total;
+       struct stmmac_rfs_entry *rfs_entries;
 
        /* Pulse Per Second output */
        struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
index 748195697e5a09bf09b7a3206ece0ec19807232c..8ded4be08b001fdc7429871883907b4a93113b64 100644 (file)
@@ -1461,16 +1461,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       if (priv->dma_cap.addr64 <= 32)
+               gfp |= GFP_DMA32;
 
        if (!buf->page) {
-               buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+               buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                if (!buf->page)
                        return -ENOMEM;
                buf->page_offset = stmmac_rx_offset(priv);
        }
 
        if (priv->sph && !buf->sec_page) {
-               buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+               buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                if (!buf->sec_page)
                        return -ENOMEM;
 
@@ -4482,6 +4486,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
        int dirty = stmmac_rx_dirty(priv, queue);
        unsigned int entry = rx_q->dirty_rx;
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       if (priv->dma_cap.addr64 <= 32)
+               gfp |= GFP_DMA32;
 
        while (dirty-- > 0) {
                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4502,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
                        p = rx_q->dma_rx + entry;
 
                if (!buf->page) {
-                       buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+                       buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                        if (!buf->page)
                                break;
                }
 
                if (priv->sph && !buf->sec_page) {
-                       buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+                       buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
                        if (!buf->sec_page)
                                break;
 
@@ -5540,8 +5548,6 @@ static int stmmac_set_features(struct net_device *netdev,
                               netdev_features_t features)
 {
        struct stmmac_priv *priv = netdev_priv(netdev);
-       bool sph_en;
-       u32 chan;
 
        /* Keep the COE Type in case of csum is supporting */
        if (features & NETIF_F_RXCSUM)
@@ -5553,10 +5559,13 @@ static int stmmac_set_features(struct net_device *netdev,
         */
        stmmac_rx_ipc(priv, priv->hw);
 
-       sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+       if (priv->sph_cap) {
+               bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+               u32 chan;
 
-       for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
-               stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+               for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+                       stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+       }
 
        return 0;
 }
index 580cc035536bd8893ffc0daca0c51231e3969ae6..be9b58b2abf9bec07743969f75ca2e902acff7b6 100644 (file)
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
                time.tv_nsec = priv->plat->est->btr_reserve[0];
                time.tv_sec = priv->plat->est->btr_reserve[1];
                basetime = timespec64_to_ktime(time);
-               cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+               cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
                             priv->plat->est->ctr[0];
                time = stmmac_calc_tas_basetime(basetime,
                                                current_time_ns,
index 1c4ea0b1b845b3c162f533656a0eadca56f8600e..d0a2b289f4603ce1b594ff03689240c6f52f8e1a 100644 (file)
@@ -232,11 +232,33 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
        }
 }
 
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+       int i;
+
+       priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+
+       for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+               priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+       priv->rfs_entries = devm_kcalloc(priv->device,
+                                        priv->rfs_entries_total,
+                                        sizeof(*priv->rfs_entries),
+                                        GFP_KERNEL);
+       if (!priv->rfs_entries)
+               return -ENOMEM;
+
+       dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+                priv->rfs_entries_total);
+
+       return 0;
+}
+
 static int tc_init(struct stmmac_priv *priv)
 {
        struct dma_features *dma_cap = &priv->dma_cap;
        unsigned int count;
-       int i;
+       int ret, i;
 
        if (dma_cap->l3l4fnum) {
                priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +272,14 @@ static int tc_init(struct stmmac_priv *priv)
                for (i = 0; i < priv->flow_entries_max; i++)
                        priv->flow_entries[i].idx = i;
 
-               dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+               dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
                         priv->flow_entries_max);
        }
 
+       ret = tc_rfs_init(priv);
+       if (ret)
+               return -ENOMEM;
+
        if (!priv->plat->fpe_cfg) {
                priv->plat->fpe_cfg = devm_kzalloc(priv->device,
                                                   sizeof(*priv->plat->fpe_cfg),
@@ -607,16 +633,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
        return ret;
 }
 
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+                                           struct flow_cls_offload *cls,
+                                           bool get_free)
+{
+       int i;
+
+       for (i = 0; i < priv->rfs_entries_total; i++) {
+               struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+               if (entry->cookie == cls->cookie)
+                       return entry;
+               if (get_free && entry->in_use == false)
+                       return entry;
+       }
+
+       return NULL;
+}
+
 #define VLAN_PRIO_FULL_MASK (0x07)
 
 static int tc_add_vlan_flow(struct stmmac_priv *priv,
                            struct flow_cls_offload *cls)
 {
+       struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
        struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
        struct flow_dissector *dissector = rule->match.dissector;
        int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
        struct flow_match_vlan match;
 
+       if (!entry) {
+               entry = tc_find_rfs(priv, cls, true);
+               if (!entry)
+                       return -ENOENT;
+       }
+
+       if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+           priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+               return -ENOENT;
+
        /* Nothing to do here */
        if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
                return -EINVAL;
@@ -638,6 +693,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
 
                prio = BIT(match.key->vlan_priority);
                stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+               entry->in_use = true;
+               entry->cookie = cls->cookie;
+               entry->tc = tc;
+               entry->type = STMMAC_RFS_T_VLAN;
+               priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
        }
 
        return 0;
@@ -646,20 +707,19 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
 static int tc_del_vlan_flow(struct stmmac_priv *priv,
                            struct flow_cls_offload *cls)
 {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
-       struct flow_dissector *dissector = rule->match.dissector;
-       int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+       struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
 
-       /* Nothing to do here */
-       if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
-               return -EINVAL;
+       if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+               return -ENOENT;
 
-       if (tc < 0) {
-               netdev_err(priv->dev, "Invalid traffic class\n");
-               return -EINVAL;
-       }
+       stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+       entry->in_use = false;
+       entry->cookie = 0;
+       entry->tc = 0;
+       entry->type = 0;
 
-       stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+       priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
 
        return 0;
 }
index c092cb61416a180a5ce1d0d28bd163e4a1dab302..ffbbda8f4d41671d556fa1a019f9495b23fa83eb 100644 (file)
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                if (ret < 0) {
                        dev_err(dev, "%pOF error reading port_id %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                if (!port_id || port_id > common->port_num) {
                        dev_err(dev, "%pOF has invalid port_id %u %s\n",
                                port_np, port_id, port_np->name);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto of_node_put;
                }
 
                port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                                (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
 
                port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
-               if (IS_ERR(port->slave.mac_sl))
-                       return PTR_ERR(port->slave.mac_sl);
+               if (IS_ERR(port->slave.mac_sl)) {
+                       ret = PTR_ERR(port->slave.mac_sl);
+                       goto of_node_put;
+               }
 
                port->disabled = !of_device_is_available(port_np);
                if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                        ret = PTR_ERR(port->slave.ifphy);
                        dev_err(dev, "%pOF error retrieving port phy: %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                /* get phy/link info */
                if (of_phy_is_fixed_link(port_np)) {
                        ret = of_phy_register_fixed_link(port_np);
-                       if (ret)
-                               return dev_err_probe(dev, ret,
+                       if (ret) {
+                               ret = dev_err_probe(dev, ret,
                                                     "failed to register fixed-link phy %pOF\n",
                                                     port_np);
+                               goto of_node_put;
+                       }
                        port->slave.phy_node = of_node_get(port_np);
                } else {
                        port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                if (!port->slave.phy_node) {
                        dev_err(dev,
                                "slave[%d] no phy found\n", port_id);
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto of_node_put;
                }
 
                ret = of_get_phy_mode(port_np, &port->slave.phy_if);
                if (ret) {
                        dev_err(dev, "%pOF read phy-mode err %d\n",
                                port_np, ret);
-                       return ret;
+                       goto of_node_put;
                }
 
                ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
        }
 
        return 0;
+
+of_node_put:
+       of_node_put(port_np);
+       of_node_put(node);
+       return ret;
 }
 
 static void am65_cpsw_pcpu_stats_free(void *data)
index b06c17ac8d4eedeb8a2903c9e5c9fa73f191b90b..ebd287039a54631aa9a1e2e317a50476dab88315 100644 (file)
@@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev)
        hw->hw_res.start = res->start;
        hw->hw_res.size = resource_size(res);
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+       if (hw->hw_res.irq < 0) {
+               err = hw->hw_res.irq;
+               goto err_free_control_wq;
+       }
+
        err = fjes_hw_init(&adapter->hw);
        if (err)
                goto err_free_control_wq;
index 7da2bb8a443c02ba6680edb36a3102fcac95023c..edde9c3ae12b90e7a53e76ea5a2d44b4f34f52c8 100644 (file)
@@ -794,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty)
         */
        netif_stop_queue(ax->dev);
 
-       ax->tty = NULL;
-
        unregister_netdev(ax->dev);
 
        /* Free all AX25 frame buffers after unreg. */
        kfree(ax->rbuff);
        kfree(ax->xbuff);
 
+       ax->tty = NULL;
+
        free_netdev(ax->dev);
 }
 
index 90aafb56f1409ed7be70dd5c4e8b4a8cce7f7dda..a438202129323860798fc23708d1105d1ad5d446 100644 (file)
@@ -514,6 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
                                goto err_free;
                        key = nmap->entry[i].key;
                        *key = i;
+                       memset(nmap->entry[i].value, 0, offmap->map.value_size);
                }
        }
 
index 0ab6a40be61147bde13b41279e32a8e9296417dd..a6a713b31aad93c807722458e27f3eec594118b4 100644 (file)
@@ -77,7 +77,10 @@ static int nsim_set_ringparam(struct net_device *dev,
 {
        struct netdevsim *ns = netdev_priv(dev);
 
-       memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring));
+       ns->ethtool.ring.rx_pending = ring->rx_pending;
+       ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending;
+       ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending;
+       ns->ethtool.ring.tx_pending = ring->tx_pending;
        return 0;
 }
 
index c204067f189026266373c4f06b149071da9c632c..c198722e4871d73685fbd012d68d31d0e592250f 100644 (file)
@@ -460,6 +460,9 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
 
                if (addr == mdiodev->addr) {
                        device_set_node(dev, of_fwnode_handle(child));
+                       /* The refcount on "child" is passed to the mdio
+                        * device. Do _not_ use of_node_put(child) here.
+                        */
                        return;
                }
        }
index 5904546acae616938ca27b9e6050b21088eec469..ea82ea5660e788709d676c3d8c35f02416d56bff 100644 (file)
@@ -1388,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop);
  * @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
  *
  * Handle a network device suspend event. There are several cases:
+ *
  * - If Wake-on-Lan is not active, we can bring down the link between
  *   the MAC and PHY by calling phylink_stop().
  * - If Wake-on-Lan is active, and being handled only by the PHY, we
index 1572878c340319953fc07ab2721dc9a994ed1b88..45a67e72a02c6c66102c9419a4b476b292b67da3 100644 (file)
@@ -209,6 +209,9 @@ struct tun_struct {
        struct tun_prog __rcu *steering_prog;
        struct tun_prog __rcu *filter_prog;
        struct ethtool_link_ksettings link_ksettings;
+       /* init args */
+       struct file *file;
+       struct ifreq *ifr;
 };
 
 struct veth {
@@ -216,6 +219,9 @@ struct veth {
        __be16 h_vlan_TCI;
 };
 
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
 static int tun_napi_receive(struct napi_struct *napi, int budget)
 {
        struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 
 static const struct ethtool_ops tun_ethtool_ops;
 
+static int tun_net_init(struct net_device *dev)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+       struct ifreq *ifr = tun->ifr;
+       int err;
+
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       spin_lock_init(&tun->lock);
+
+       err = security_tun_dev_alloc_security(&tun->security);
+       if (err < 0) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
+       tun_flow_init(tun);
+
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+                          TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+                          NETIF_F_HW_VLAN_STAG_TX;
+       dev->features = dev->hw_features | NETIF_F_LLTX;
+       dev->vlan_features = dev->features &
+                            ~(NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_HW_VLAN_STAG_TX);
+
+       tun->flags = (tun->flags & ~TUN_FEATURES) |
+                     (ifr->ifr_flags & TUN_FEATURES);
+
+       INIT_LIST_HEAD(&tun->disabled);
+       err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+                        ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+       if (err < 0) {
+               tun_flow_uninit(tun);
+               security_tun_dev_free_security(tun->security);
+               free_percpu(dev->tstats);
+               return err;
+       }
+       return 0;
+}
+
 /* Net device detach from fd. */
 static void tun_net_uninit(struct net_device *dev)
 {
@@ -1169,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
 }
 
 static const struct net_device_ops tun_netdev_ops = {
+       .ndo_init               = tun_net_init,
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
@@ -1252,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
 }
 
 static const struct net_device_ops tap_netdev_ops = {
+       .ndo_init               = tun_net_init,
        .ndo_uninit             = tun_net_uninit,
        .ndo_open               = tun_net_open,
        .ndo_stop               = tun_net_close,
@@ -1292,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
 #define MAX_MTU 65535
 
 /* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
 
@@ -2206,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev)
        BUG_ON(!(list_empty(&tun->disabled)));
 
        free_percpu(dev->tstats);
-       /* We clear tstats so that tun_set_iff() can tell if
-        * tun_free_netdev() has been called from register_netdevice().
-        */
-       dev->tstats = NULL;
-
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
        __tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2716,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun->rx_batched = 0;
                RCU_INIT_POINTER(tun->steering_prog, NULL);
 
-               dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-               if (!dev->tstats) {
-                       err = -ENOMEM;
-                       goto err_free_dev;
-               }
-
-               spin_lock_init(&tun->lock);
-
-               err = security_tun_dev_alloc_security(&tun->security);
-               if (err < 0)
-                       goto err_free_stat;
-
-               tun_net_init(dev);
-               tun_flow_init(tun);
+               tun->ifr = ifr;
+               tun->file = file;
 
-               dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
-                                  TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
-                                  NETIF_F_HW_VLAN_STAG_TX;
-               dev->features = dev->hw_features | NETIF_F_LLTX;
-               dev->vlan_features = dev->features &
-                                    ~(NETIF_F_HW_VLAN_CTAG_TX |
-                                      NETIF_F_HW_VLAN_STAG_TX);
-
-               tun->flags = (tun->flags & ~TUN_FEATURES) |
-                             (ifr->ifr_flags & TUN_FEATURES);
-
-               INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
-                                ifr->ifr_flags & IFF_NAPI_FRAGS, false);
-               if (err < 0)
-                       goto err_free_flow;
+               tun_net_initialize(dev);
 
                err = register_netdevice(tun->dev);
-               if (err < 0)
-                       goto err_detach;
+               if (err < 0) {
+                       free_netdev(dev);
+                       return err;
+               }
                /* free_netdev() won't check refcnt, to avoid race
                 * with dev_put() we need publish tun after registration.
                 */
@@ -2767,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
        strcpy(ifr->ifr_name, tun->dev->name);
        return 0;
-
-err_detach:
-       tun_detach_all(dev);
-       /* We are here because register_netdevice() has failed.
-        * If register_netdevice() already called tun_free_netdev()
-        * while dealing with the error, dev->stats has been cleared.
-        */
-       if (!dev->tstats)
-               goto err_free_dev;
-
-err_free_flow:
-       tun_flow_uninit(tun);
-       security_tun_dev_free_security(tun->security);
-err_free_stat:
-       free_percpu(dev->tstats);
-err_free_dev:
-       free_netdev(dev);
-       return err;
 }
 
 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
index 42ba4af68090726eaf71750477b92b9bd16582ac..71682970be584ea2debf3ac1cfe132b9177f16e9 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "asix.h"
 
+#define AX_HOST_EN_RETRIES     30
+
 int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
                  u16 size, void *data, int in_pm)
 {
@@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
        int i, ret;
        u8 smsr;
 
-       for (i = 0; i < 30; ++i) {
+       for (i = 0; i < AX_HOST_EN_RETRIES; ++i) {
                ret = asix_set_sw_mii(dev, in_pm);
                if (ret == -ENODEV || ret == -ETIMEDOUT)
                        break;
@@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
                                    0, 0, 1, &smsr, in_pm);
                if (ret == -ENODEV)
                        break;
-               else if (ret < 0)
+               else if (ret < sizeof(smsr))
                        continue;
                else if (smsr & AX_HOST_EN)
                        break;
        }
 
-       return ret;
+       return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret;
 }
 
 static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
index 24753a4da7e606ef956d6fcb45d2da51d6f67f5b..e303b522efb50a4bb86443da3ffd20794e666734 100644 (file)
@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
                min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
 
        max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+       if (max == 0)
+               max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
 
        /* some devices set dwNtbOutMaxSize too low for the above default */
        min = min(min, max);
index f20376c1ef3fb1f3cc924a7e06a52801aac7696a..075f8abde5cd7ff77208a9e648e73868c04642ef 100644 (file)
@@ -76,6 +76,8 @@
 #define LAN7801_USB_PRODUCT_ID         (0x7801)
 #define LAN78XX_EEPROM_MAGIC           (0x78A5)
 #define LAN78XX_OTP_MAGIC              (0x78F3)
+#define AT29M2AF_USB_VENDOR_ID         (0x07C9)
+#define AT29M2AF_USB_PRODUCT_ID        (0x0012)
 
 #define        MII_READ                        1
 #define        MII_WRITE                       0
@@ -2228,7 +2230,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
        if (dev->domain_data.phyirq > 0)
                phydev->irq = dev->domain_data.phyirq;
        else
-               phydev->irq = 0;
+               phydev->irq = PHY_POLL;
        netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
 
        /* set to AUTOMDIX */
@@ -4734,6 +4736,10 @@ static const struct usb_device_id products[] = {
        /* LAN7801 USB Gigabit Ethernet Device */
        USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
        },
+       {
+       /* ATM2-AF USB Gigabit Ethernet Device */
+       USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
+       },
        {},
 };
 MODULE_DEVICE_TABLE(usb, products);
index 86b814e99224c54ff99ee9b5c82fb73821486df1..f510e82194705d5ccc50e94606bc10acf4774006 100644 (file)
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
index f9877a3e83acf8194a923e5dbdb129f00da202b9..3085e8118d7fa7b7c16da711a76e968c4c6ce968 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "12"
 
 /* Information for net */
-#define NET_VERSION            "11"
+#define NET_VERSION            "12"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
        ocp_write_word(tp, type, PLA_BP_BA, 0);
 }
 
+static inline void rtl_reset_ocp_base(struct r8152 *tp)
+{
+       tp->ocp_base = -1;
+}
+
 static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
 {
        u16 data, check;
@@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
 
        rtl_phy_patch_request(tp, false, wait);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
-
        return 0;
 }
 
@@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
        u32 len;
        u8 *data;
 
+       rtl_reset_ocp_base(tp);
+
        if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
                dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
                return;
@@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
                }
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+       rtl_reset_ocp_base(tp);
+
        rtl_phy_patch_request(tp, false, wait);
 
        if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
@@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
        ver_addr = __le16_to_cpu(phy_ver->ver.addr);
        ver = __le16_to_cpu(phy_ver->ver.data);
 
+       rtl_reset_ocp_base(tp);
+
        if (sram_read(tp, ver_addr) >= ver) {
                dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
                return 0;
@@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
 {
        u16 addr, data;
 
+       rtl_reset_ocp_base(tp);
+
        addr = __le16_to_cpu(fix->setting.addr);
        data = ocp_reg_read(tp, addr);
 
@@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph
        u32 length;
        int i, num;
 
+       rtl_reset_ocp_base(tp);
+
        num = phy->pre_num;
        for (i = 0; i < num; i++)
                sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
@@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
        u32 length, i, num;
        __le16 *data;
 
+       rtl_reset_ocp_base(tp);
+
        mode_reg = __le16_to_cpu(phy->mode_reg);
        sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
        sram_write(tp, __le16_to_cpu(phy->ba_reg),
@@ -5107,6 +5121,7 @@ post_fw:
        if (rtl_fw->post_fw)
                rtl_fw->post_fw(tp);
 
+       rtl_reset_ocp_base(tp);
        strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
        dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
 }
@@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp)
                return true;
 }
 
+static void r8156_mdio_force_mode(struct r8152 *tp)
+{
+       u16 data;
+
+       /* Select force mode through 0xa5b4 bit 15
+        * 0: MDIO force mode
+        * 1: MMD force mode
+        */
+       data = ocp_reg_read(tp, 0xa5b4);
+       if (data & BIT(15)) {
+               data &= ~BIT(15);
+               ocp_reg_write(tp, 0xa5b4, data);
+       }
+}
+
 static void set_carrier(struct r8152 *tp)
 {
        struct net_device *netdev = tp->netdev;
@@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp)
        ocp_data |= ACT_ODMA;
        ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
 
+       r8156_mdio_force_mode(tp);
        rtl_tally_reset(tp);
 
        tp->coalesce = 15000;   /* 15 us */
@@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp)
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 
+       r8156_mdio_force_mode(tp);
        rtl_tally_reset(tp);
 
        tp->coalesce = 15000;   /* 15 us */
@@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        mutex_lock(&tp->control);
 
+       rtl_reset_ocp_base(tp);
+
        if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
                ret = rtl8152_runtime_resume(tp);
        else
@@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+       rtl_reset_ocp_base(tp);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
        set_ethernet_addr(tp, true);
index 50eb43e5bf459bb998e264d399bc85d4e9d73594..2acdb8ad6c7136827ac150e1f732cf50714f1286 100644 (file)
@@ -879,8 +879,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
 
                        stats->xdp_bytes += skb->len;
                        skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
-                       if (skb)
-                               napi_gro_receive(&rq->xdp_napi, skb);
+                       if (skb) {
+                               if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
+                                       netif_receive_skb(skb);
+                               else
+                                       napi_gro_receive(&rq->xdp_napi, skb);
+                       }
                }
                done++;
        }
index 55db6a336f7ead862ab98ab99e4518a7d2b3b87f..b107835242ade6c2f7411b2a019e44a9b0c85748 100644 (file)
@@ -733,7 +733,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                pr_debug("%s: rx error: len %u exceeds max size %d\n",
                         dev->name, len, GOOD_PACKET_LEN);
                dev->stats.rx_length_errors++;
-               goto err_len;
+               goto err;
        }
 
        if (likely(!vi->xdp_enabled)) {
@@ -825,10 +825,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
 
 skip_xdp:
        skb = build_skb(buf, buflen);
-       if (!skb) {
-               put_page(page);
+       if (!skb)
                goto err;
-       }
        skb_reserve(skb, headroom - delta);
        skb_put(skb, len);
        if (!xdp_prog) {
@@ -839,13 +837,12 @@ skip_xdp:
        if (metasize)
                skb_metadata_set(skb, metasize);
 
-err:
        return skb;
 
 err_xdp:
        rcu_read_unlock();
        stats->xdp_drops++;
-err_len:
+err:
        stats->drops++;
        put_page(page);
 xdp_xmit:
index 14fae317bc70f738be004ddc31be61ce6f95974c..fd407c0e28569b171d4234c0d7f4b6ef04c6198a 100644 (file)
@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 
 #ifdef CONFIG_PCI_MSI
        if (adapter->intr.type == VMXNET3_IT_MSIX) {
-               int i, nvec;
+               int i, nvec, nvec_allocated;
 
                nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
                        1 : adapter->num_tx_queues;
@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                for (i = 0; i < nvec; i++)
                        adapter->intr.msix_entries[i].entry = i;
 
-               nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
-               if (nvec < 0)
+               nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+               if (nvec_allocated < 0)
                        goto msix_err;
 
                /* If we cannot allocate one MSIx vector per queue
                 * then limit the number of rx queues to 1
                 */
-               if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
+               if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+                   nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
                        if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
                            || adapter->num_rx_queues != 1) {
                                adapter->share_intr = VMXNET3_INTR_TXSHARE;
@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
                        }
                }
 
-               adapter->intr.num_intrs = nvec;
+               adapter->intr.num_intrs = nvec_allocated;
                return;
 
 msix_err:
                /* If we cannot allocate MSIx vectors use only one rx queue */
                dev_info(&adapter->pdev->dev,
                         "Failed to enable MSI-X, error %d. "
-                        "Limiting #rx queues to 1, try MSI.\n", nvec);
+                        "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
 
                adapter->intr.type = VMXNET3_IT_MSI;
        }
index ccf677015d5bc7f7a93d42f1c71571eb08b38e0f..b2242a082431c2c7621a5fbc1f21fb750c7e2da5 100644 (file)
@@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
        /* strip the ethernet header added for pass through VRF device */
        __skb_pull(skb, skb_network_offset(skb));
 
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
        ret = vrf_ip6_local_out(net, skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                dev->stats.tx_errors++;
@@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                                               RT_SCOPE_LINK);
        }
 
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
        ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
        if (unlikely(net_xmit_eval(ret)))
                vrf_dev->stats.tx_errors++;
@@ -768,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
 
@@ -790,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
        if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
                return vrf_ip6_out_direct(vrf_dev, sk, skb);
@@ -998,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
 
        skb->dev = vrf_dev;
 
-       vrf_nf_set_untracked(skb);
-
        err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
                      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
 
@@ -1021,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
            ipv4_is_lbcast(ip_hdr(skb)->daddr))
                return skb;
 
+       vrf_nf_set_untracked(skb);
+
        if (qdisc_tx_is_default(vrf_dev) ||
            IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
                return vrf_ip_out_direct(vrf_dev, sk, skb);
index b7197e80f2264053d4e4e28bdf69a33038335294..9a4c8ff32d9dd9407ec50591a44008570f4e7411 100644 (file)
@@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
        return exact;
 }
 
-static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
 {
        node->parent_bit_packed = (unsigned long)parent | bit;
        rcu_assign_pointer(*parent, node);
index 551ddaaaf5400e6eb3e138853f0b4710e0a48901..a46067c38bf5def99accd4b885d3c3bd3095ad4b 100644 (file)
@@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
 {
        struct wg_device *wg = netdev_priv(dev);
        struct wg_peer *peer;
+       struct sk_buff *skb;
 
        mutex_lock(&wg->device_update_lock);
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
                wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
        }
        mutex_unlock(&wg->device_update_lock);
-       skb_queue_purge(&wg->incoming_handshakes);
+       while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+               kfree_skb(skb);
+       atomic_set(&wg->handshake_queue_len, 0);
        wg_socket_reinit(wg, NULL, NULL);
        return 0;
 }
@@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
        destroy_workqueue(wg->handshake_receive_wq);
        destroy_workqueue(wg->handshake_send_wq);
        destroy_workqueue(wg->packet_crypt_wq);
-       wg_packet_queue_free(&wg->decrypt_queue);
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->handshake_queue, true);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
        rcu_barrier(); /* Wait for all the peers to be actually freed. */
        wg_ratelimiter_uninit();
        memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
-       skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
-       free_percpu(wg->incoming_handshakes_worker);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
@@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
-       skb_queue_head_init(&wg->incoming_handshakes);
        wg_allowedips_init(&wg->peer_allowedips);
        wg_cookie_checker_init(&wg->cookie_checker, wg);
        INIT_LIST_HEAD(&wg->peer_list);
@@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (!dev->tstats)
                goto err_free_index_hashtable;
 
-       wg->incoming_handshakes_worker =
-               wg_packet_percpu_multicore_worker_alloc(
-                               wg_packet_handshake_receive_worker, wg);
-       if (!wg->incoming_handshakes_worker)
-               goto err_free_tstats;
-
        wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
                        WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
        if (!wg->handshake_receive_wq)
-               goto err_free_incoming_handshakes;
+               goto err_free_tstats;
 
        wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
                        WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        if (ret < 0)
                goto err_free_encrypt_queue;
 
-       ret = wg_ratelimiter_init();
+       ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+                                  MAX_QUEUED_INCOMING_HANDSHAKES);
        if (ret < 0)
                goto err_free_decrypt_queue;
 
+       ret = wg_ratelimiter_init();
+       if (ret < 0)
+               goto err_free_handshake_queue;
+
        ret = register_netdevice(dev);
        if (ret < 0)
                goto err_uninit_ratelimiter;
@@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
 
 err_uninit_ratelimiter:
        wg_ratelimiter_uninit();
+err_free_handshake_queue:
+       wg_packet_queue_free(&wg->handshake_queue, false);
 err_free_decrypt_queue:
-       wg_packet_queue_free(&wg->decrypt_queue);
+       wg_packet_queue_free(&wg->decrypt_queue, false);
 err_free_encrypt_queue:
-       wg_packet_queue_free(&wg->encrypt_queue);
+       wg_packet_queue_free(&wg->encrypt_queue, false);
 err_destroy_packet_crypt:
        destroy_workqueue(wg->packet_crypt_wq);
 err_destroy_handshake_send:
        destroy_workqueue(wg->handshake_send_wq);
 err_destroy_handshake_receive:
        destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
-       free_percpu(wg->incoming_handshakes_worker);
 err_free_tstats:
        free_percpu(dev->tstats);
 err_free_index_hashtable:
@@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
 static void wg_netns_pre_exit(struct net *net)
 {
        struct wg_device *wg;
+       struct wg_peer *peer;
 
        rtnl_lock();
        list_for_each_entry(wg, &device_list, device_list) {
@@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
                        mutex_lock(&wg->device_update_lock);
                        rcu_assign_pointer(wg->creating_net, NULL);
                        wg_socket_reinit(wg, NULL, NULL);
+                       list_for_each_entry(peer, &wg->peer_list, peer_list)
+                               wg_socket_clear_peer_endpoint_src(peer);
                        mutex_unlock(&wg->device_update_lock);
                }
        }
index 854bc3d97150e1c1dab3befbe64966add4f65746..43c7cebbf50b08f2a1868f0017d0bee8aee700f8 100644 (file)
@@ -39,21 +39,18 @@ struct prev_queue {
 
 struct wg_device {
        struct net_device *dev;
-       struct crypt_queue encrypt_queue, decrypt_queue;
+       struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
        struct sock __rcu *sock4, *sock6;
        struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
-       struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
-       struct workqueue_struct *packet_crypt_wq;
-       struct sk_buff_head incoming_handshakes;
-       int incoming_handshake_cpu;
-       struct multicore_worker __percpu *incoming_handshakes_worker;
+       struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
        struct cookie_checker cookie_checker;
        struct pubkey_hashtable *peer_hashtable;
        struct index_hashtable *index_hashtable;
        struct allowedips peer_allowedips;
        struct mutex device_update_lock, socket_update_lock;
        struct list_head device_list, peer_list;
+       atomic_t handshake_queue_len;
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
index 75dbe77b0b4b4aeacbc75d77524108248b37a2fa..ee4da9ab8013c3ad2721e0e1d4432b2fe007886b 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/genetlink.h>
 #include <net/rtnetlink.h>
 
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
 {
        int ret;
 
@@ -60,7 +60,7 @@ err_allowedips:
        return ret;
 }
 
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
@@ -68,8 +68,8 @@ static void __exit mod_exit(void)
        wg_allowedips_slab_uninit();
 }
 
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("WireGuard secure network tunnel");
 MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
index 48e7b982a30736bc147712ce52957517e3e862af..1de413b19e3424a2ace2edcbcf0d0d49c4be6167 100644 (file)
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
        return 0;
 }
 
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
 {
        free_percpu(queue->worker);
-       WARN_ON(!__ptr_ring_empty(&queue->ring));
-       ptr_ring_cleanup(&queue->ring, NULL);
+       WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+       ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
 }
 
 #define NEXT(skb) ((skb)->prev)
index 4ef2944a68bc906ebec5167d1e17e281ea67be61..e2388107f7fdc9c040841adfc164459e0c777d7d 100644 (file)
@@ -23,7 +23,7 @@ struct sk_buff;
 /* queueing.c APIs: */
 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
                         unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
 struct multicore_worker __percpu *
 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
 
index 3fedd1d21f5ee019917a7280294cfc7398e65b11..dd55e5c26f468f71518cc5956f4b84480af5d9c8 100644 (file)
@@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
                        (1U << 14) / sizeof(struct hlist_head)));
        max_entries = table_size * 8;
 
-       table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+       table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
        if (unlikely(!table_v4))
                goto err_kmemcache;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+       table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
        if (unlikely(!table_v6)) {
                kvfree(table_v4);
                goto err_kmemcache;
index 7dc84bcca26139991be00759c0228d3126df2671..7b8df406c7737398f0270361afcb196af4b6a76e 100644 (file)
@@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
                return;
        }
 
-       under_load = skb_queue_len(&wg->incoming_handshakes) >=
-                    MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+       under_load = atomic_read(&wg->handshake_queue_len) >=
+                       MAX_QUEUED_INCOMING_HANDSHAKES / 8;
        if (under_load) {
                last_under_load = ktime_get_coarse_boottime_ns();
        } else if (last_under_load) {
@@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
 
 void wg_packet_handshake_receive_worker(struct work_struct *work)
 {
-       struct wg_device *wg = container_of(work, struct multicore_worker,
-                                           work)->ptr;
+       struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+       struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
        struct sk_buff *skb;
 
-       while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+       while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
                wg_receive_handshake_packet(wg, skb);
                dev_kfree_skb(skb);
+               atomic_dec(&wg->handshake_queue_len);
                cond_resched();
        }
 }
@@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
        case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
        case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
        case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
-               int cpu;
-
-               if (skb_queue_len(&wg->incoming_handshakes) >
-                           MAX_QUEUED_INCOMING_HANDSHAKES ||
-                   unlikely(!rng_is_initialized())) {
+               int cpu, ret = -EBUSY;
+
+               if (unlikely(!rng_is_initialized()))
+                       goto drop;
+               if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+                       if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+                               ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+                               spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+                       }
+               } else
+                       ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+               if (ret) {
+       drop:
                        net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
                                                wg->dev->name, skb);
                        goto err;
                }
-               skb_queue_tail(&wg->incoming_handshakes, skb);
-               /* Queues up a call to packet_process_queued_handshake_
-                * packets(skb):
-                */
-               cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+               atomic_inc(&wg->handshake_queue_len);
+               cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+               /* Queues up a call to packet_process_queued_handshake_packets(skb): */
                queue_work_on(cpu, wg->handshake_receive_wq,
-                       &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+                             &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
                break;
        }
        case cpu_to_le32(MESSAGE_DATA):
index 8c496b7471082eb6c093154d7a05662718f9999c..6f07b949cb81d037842934d6836f3c8b79e4d0ed 100644 (file)
@@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
 {
        write_lock_bh(&peer->endpoint_lock);
        memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
-       dst_cache_reset(&peer->endpoint_cache);
+       dst_cache_reset_now(&peer->endpoint_cache);
        write_unlock_bh(&peer->endpoint_lock);
 }
 
index 26c7ae242db67bd41f88c69f30f106b9a8bf577c..49c0b1ad40a02d4abcb18874dd1d3ca9e6cbc5ad 100644 (file)
@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
                ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_RESUME:
-               ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+               /* Do force MHI resume as some devices like QCA6390, WCN6855
+                * are not in M3 state but they are functional. So just ignore
+                * the MHI state while resuming.
+                */
+               ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
                break;
        case ATH11K_MHI_TRIGGER_RDDM:
                ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
index 5bf2318763c55b5a11a7d2cebc3114ffd61a6373..3a1a35b5672f1a27911a287579a7ce737a157f0e 100644 (file)
@@ -7,16 +7,20 @@ config BRCMSMAC
        depends on MAC80211
        depends on BCMA_POSSIBLE
        select BCMA
-       select NEW_LEDS if BCMA_DRIVER_GPIO
-       select LEDS_CLASS if BCMA_DRIVER_GPIO
        select BRCMUTIL
        select FW_LOADER
        select CORDIC
        help
          This module adds support for PCIe wireless adapters based on Broadcom
-         IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
-         be available if you select BCMA_DRIVER_GPIO. If you choose to build a
-         module, the driver will be called brcmsmac.ko.
+         IEEE802.11n SoftMAC chipsets. If you choose to build a module, the
+         driver will be called brcmsmac.ko.
+
+config BRCMSMAC_LEDS
+       def_bool BRCMSMAC && BCMA_DRIVER_GPIO && MAC80211_LEDS
+       help
+         The brcmsmac LED support depends on the presence of the
+         BCMA_DRIVER_GPIO driver, and it only works if LED support
+         is enabled and reachable from the driver module.
 
 source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
 
index 482d7737764da76c4ae253641f0d30408dbe28ec..090757730ba60db0708afb5093ddb4cc90ce8e00 100644 (file)
@@ -42,6 +42,6 @@ brcmsmac-y := \
        brcms_trace_events.o \
        debug.o
 
-brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o
+brcmsmac-$(CONFIG_BRCMSMAC_LEDS) += led.o
 
 obj-$(CONFIG_BRCMSMAC) += brcmsmac.o
index d65f5c268fd77490a3b2a7d573fe229542408f06..2a5cbeb9e783122b00266b809b3b8853815d246e 100644 (file)
@@ -24,7 +24,7 @@ struct brcms_led {
        struct gpio_desc *gpiod;
 };
 
-#ifdef CONFIG_BCMA_DRIVER_GPIO
+#ifdef CONFIG_BRCMSMAC_LEDS
 void brcms_led_unregister(struct brcms_info *wl);
 int brcms_led_register(struct brcms_info *wl);
 #else
index 24fe3f63c3215aca38913279318a77abb3001431..7eacc8e58ee1440be729eb07fc5f82270d0e4773 100644 (file)
@@ -2,14 +2,13 @@
 config IWLEGACY
        tristate
        select FW_LOADER
-       select NEW_LEDS
-       select LEDS_CLASS
        select LEDS_TRIGGERS
        select MAC80211_LEDS
 
 config IWL4965
        tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
        depends on PCI && MAC80211
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select IWLEGACY
        help
          This option enables support for
@@ -38,6 +37,7 @@ config IWL4965
 config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
        depends on PCI && MAC80211
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        select IWLEGACY
        help
          Select to build the driver supporting the:
index 1085afbefba873f792b0ee38dba15c4a7a358795..418ae4f870ab7086a773e78a9bb2f14c1485c08e 100644 (file)
@@ -47,7 +47,7 @@ if IWLWIFI
 
 config IWLWIFI_LEDS
        bool
-       depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+       depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
        depends on IWLMVM || IWLDVM
        select LEDS_TRIGGERS
        select MAC80211_LEDS
index c875bf35533ce4b8126f88e78ae9bb271d951129..009dd4be597b0c8c096582eeb876cbcb30ba658c 100644 (file)
@@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                if (len < tlv_len) {
                        IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
                                len, tlv_len);
+                       kfree(reduce_power_data);
                        reduce_power_data = ERR_PTR(-EINVAL);
                        goto out;
                }
@@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
                                IWL_DEBUG_FW(trans,
                                             "Couldn't allocate (more) reduce_power_data\n");
 
+                               kfree(reduce_power_data);
                                reduce_power_data = ERR_PTR(-ENOMEM);
                                goto out;
                        }
@@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
 done:
        if (!size) {
                IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+               /* Better safe than sorry, but 'reduce_power_data' should
+                * always be NULL if !size.
+                */
+               kfree(reduce_power_data);
                reduce_power_data = ERR_PTR(-ENOENT);
                goto out;
        }
index 36196e07b1a04597d61b5cadca7f7d0d927fc432..5cec467b995bb665f340d95b6aa33f10436ab0e0 100644 (file)
@@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
        const struct iwl_op_mode_ops *ops = op->ops;
        struct dentry *dbgfs_dir = NULL;
        struct iwl_op_mode *op_mode = NULL;
+       int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
+
+       for (retry = 0; retry <= max_retry; retry++) {
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       drv->dbgfs_op_mode = debugfs_create_dir(op->name,
-                                               drv->dbgfs_drv);
-       dbgfs_dir = drv->dbgfs_op_mode;
+               drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+                                                       drv->dbgfs_drv);
+               dbgfs_dir = drv->dbgfs_op_mode;
 #endif
 
-       op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+               op_mode = ops->start(drv->trans, drv->trans->cfg,
+                                    &drv->fw, dbgfs_dir);
+
+               if (op_mode)
+                       return op_mode;
+
+               IWL_ERR(drv, "retry init count %d\n", retry);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (!op_mode) {
                debugfs_remove_recursive(drv->dbgfs_op_mode);
                drv->dbgfs_op_mode = NULL;
-       }
 #endif
+       }
 
-       return op_mode;
+       return NULL;
 }
 
 static void _iwl_op_mode_stop(struct iwl_drv *drv)
index 2e2d60a586925d8a03714ab5919344da50ae4438..0fd009e6d6857f5939e73356c9f73a46001fe4d5 100644 (file)
@@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
 #define IWL_EXPORT_SYMBOL(sym)
 #endif
 
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
 #endif /* __iwl_drv_h__ */
index 9fb9c7dad314f1bcbbf6e4f67645befdb3d31d08..897e3b91ddb2fec9e3d2f85842fbcc85f44281ca 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/ieee80211_radiotap.h>
 #include <net/tcp.h>
 
+#include "iwl-drv.h"
 #include "iwl-op-mode.h"
 #include "iwl-io.h"
 #include "mvm.h"
@@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
+       int retry, max_retry = 0;
 
        mutex_lock(&mvm->mutex);
-       ret = __iwl_mvm_mac_start(mvm);
+
+       /* we are starting the mac not in error flow, and restart is enabled */
+       if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+           iwlwifi_mod_params.fw_restart) {
+               max_retry = IWL_MAX_INIT_RETRY;
+               /*
+                * This will prevent mac80211 recovery flows to trigger during
+                * init failures
+                */
+               set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+       }
+
+       for (retry = 0; retry <= max_retry; retry++) {
+               ret = __iwl_mvm_mac_start(mvm);
+               if (!ret)
+                       break;
+
+               IWL_ERR(mvm, "mac start retry %d\n", retry);
+       }
+       clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+
        mutex_unlock(&mvm->mutex);
 
        return ret;
index 2b1dcd60e00f65598d77712b042493247b4a2b91..a72d85086fe331cb256a2e88c88293a5e4b55bf0 100644 (file)
@@ -1123,6 +1123,8 @@ struct iwl_mvm {
  * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
  * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
  * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_STARTING: starting mac,
+ *     used to disable restart flow while in STARTING state
  */
 enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_RFKILL,
@@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_FIRMWARE_RUNNING,
        IWL_MVM_STATUS_NEED_FLUSH_P2P,
        IWL_MVM_STATUS_IN_D3,
+       IWL_MVM_STATUS_STARTING,
 };
 
 /* Keep track of completed init configuration */
index 232ad531d612a2d2b4e85fc7a8558919b119ad8d..cd08e289cd9a0bf8c3746e1734ea72664277074b 100644 (file)
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
        int ret;
 
        rtnl_lock();
+       wiphy_lock(mvm->hw->wiphy);
        mutex_lock(&mvm->mutex);
 
        ret = iwl_run_init_mvm_ucode(mvm);
@@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
                iwl_mvm_stop_device(mvm);
 
        mutex_unlock(&mvm->mutex);
+       wiphy_unlock(mvm->hw->wiphy);
        rtnl_unlock();
 
        if (ret < 0)
@@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         */
        if (!mvm->fw_restart && fw_error) {
                iwl_fw_error_collect(&mvm->fwrt, false);
+       } else if (test_bit(IWL_MVM_STATUS_STARTING,
+                           &mvm->status)) {
+               IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
        } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
 
index bdd4ee43254835e05d1a8dd18cd67d9c4acbb507..76e0b7b45980d779f19b0a969ed7a2162c583c76 100644 (file)
@@ -269,17 +269,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
        u8 rate_plcp;
        u32 rate_flags = 0;
        bool is_cck;
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
        /* info->control is only relevant for non HW rate control */
        if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
                /* HT rate doesn't make sense for a non data frame */
                WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
                          !ieee80211_is_data(fc),
                          "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
                          info->control.rates[0].flags,
                          info->control.rates[0].idx,
-                         le16_to_cpu(fc), mvmsta->sta_state);
+                         le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
 
                rate_idx = info->control.rates[0].idx;
        }
index c574f041f0969268132a1fe3f68b347bea3b903a..5ce07f28e7c33e853768581f851e3b72e9bd715d 100644 (file)
@@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
                      u16 mac_type, u8 mac_step,
                      u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
 {
+       int num_devices = ARRAY_SIZE(iwl_dev_info_table);
        int i;
 
-       for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+       if (!num_devices)
+               return NULL;
+
+       for (i = num_devices - 1; i >= 0; i--) {
                const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
 
                if (dev_info->device != (u16)IWL_CFG_ANY &&
@@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        if (iwl_trans->trans_cfg->rf_id &&
            iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
-           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+           !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
+               ret = -EINVAL;
                goto out_free_trans;
+       }
 
        dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
                                         CSR_HW_REV_TYPE(iwl_trans->hw_rev),
index 79ab850a45a28eb81b997204ac3c485fb902e044..c78ae4b897619827148ba14cbd245483222fbb8c 100644 (file)
@@ -34,4 +34,4 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
 obj-$(CONFIG_MT7603E) += mt7603/
 obj-$(CONFIG_MT7615_COMMON) += mt7615/
 obj-$(CONFIG_MT7915E) += mt7915/
-obj-$(CONFIG_MT7921E) += mt7921/
+obj-$(CONFIG_MT7921_COMMON) += mt7921/
index 5ee52cd70a4b45a1be50b518015a8e0943b71879..d1806f198aed99e866aa9683d8114eba2e92be5c 100644 (file)
@@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (!wcid)
                wcid = &dev->mt76.global_wcid;
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
                struct mt7615_phy *phy = &dev->phy;
 
@@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        if (id < 0)
                return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
                              pid, key, false);
 
index bd2939ebcbf4841a4265e162b0cfacdee11a7338..5a6d7829c6e04f7fd9c101e62fbc047d75486b6b 100644 (file)
@@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
 static void
 mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
                           enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *key, int pid,
                           struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
-
-       if (!wcid)
-               wcid = &dev->mt76.global_wcid;
-
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
 
-       txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
        memset(txwi, 0, MT_USB_TXD_SIZE);
        mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
        skb_push(skb, MT_USB_TXD_SIZE);
@@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct sk_buff *skb = tx_info->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct mt7615_sta *msta;
-       int pad;
+       int pad, err, pktid;
 
        msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+       if (!wcid)
+               wcid = &dev->mt76.global_wcid;
+
        if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
            msta && !msta->rate_probe) {
                /* request to configure sampling rate */
@@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                spin_unlock_bh(&dev->mt76.lock);
        }
 
-       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
        if (mt76_is_usb(mdev)) {
                u32 len = skb->len;
 
@@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                pad = round_up(skb->len, 4) - skb->len;
        }
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
 
index efd70ddc2fd109b754df8e8c12c71c5b2403f9d0..2c6c03809b20eb628b99a22bb851ff07f14be2a6 100644 (file)
@@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
        bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
        enum mt76_qsel qsel;
        u32 flags;
+       int err;
 
        mt76_insert_hdr_pad(tx_info->skb);
 
@@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
                ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
        }
 
-       return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+       if (err && wcid)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pid);
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
 
index 5fcf35f2d9fbe42760c7ff28b53c3a96f68d62f8..809dc18e5083c21206447e4b8f4e811fd0c3c41f 100644 (file)
@@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+       t->skb = tx_info->skb;
+
+       id = mt76_token_consume(mdev, &t);
+       if (id < 0)
+               return id;
 
+       pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
        mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
                              false);
 
@@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                txp->bss_idx = mvif->idx;
        }
 
-       t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
-       t->skb = tx_info->skb;
-
-       id = mt76_token_consume(mdev, &t);
-       if (id < 0)
-               return id;
-
        txp->token = cpu_to_le16(id);
        if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
                txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
index 899957b9d0f19c6c11ccb3f0c52654d49e3dec50..852d5d97c70b1517902af6177b5f16a0c0a98698 100644 (file)
@@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (ht_cap->ht_supported)
                        mode |= PHY_MODE_GN;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_24G;
        } else if (band == NL80211_BAND_5GHZ) {
                mode |= PHY_MODE_A;
@@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
                if (vht_cap->vht_supported)
                        mode |= PHY_MODE_AC;
 
-               if (he_cap->has_he)
+               if (he_cap && he_cap->has_he)
                        mode |= PHY_MODE_AX_5G;
        }
 
index 137f86a6dbf875d3122c23b2a26e670843a16db4..bdec508b6b9ffa00a65875df4de56259ad5be02e 100644 (file)
@@ -142,15 +142,11 @@ out:
 static void
 mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
                   enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key, int pid,
                   struct sk_buff *skb)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
+       __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
 
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
-       txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
        memset(txwi, 0, MT_SDIO_TXD_SIZE);
        mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
        skb_push(skb, MT_SDIO_TXD_SIZE);
@@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
 {
        struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
        struct sk_buff *skb = tx_info->skb;
-       int pad;
+       int err, pad, pktid;
 
        if (unlikely(tx_info->skb->len <= ETH_HLEN))
                return -EINVAL;
@@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                }
        }
 
-       mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+       pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+       mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
 
        mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
        pad = round_up(skb->len, 4) - skb->len;
 
-       return mt76_skb_adjust_pad(skb, pad);
+       err = mt76_skb_adjust_pad(skb, pad);
+       if (err)
+               /* Release pktid in case of error. */
+               idr_remove(&wcid->pktid, pktid);
+
+       return err;
 }
 
 void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
index 11719ef034d888482144f30dab21d0270e480ffc..6b8c9dc80542554f19a2232f4080bb5cd9b79da8 100644 (file)
@@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
                        if (!(cb->flags & MT_TX_CB_DMA_DONE))
                                continue;
 
-                       if (!time_is_after_jiffies(cb->jiffies +
+                       if (time_is_after_jiffies(cb->jiffies +
                                                   MT_TX_STATUS_SKB_TIMEOUT))
                                continue;
                }
index e4473a5512415241d012f973862e12d122b3845e..74c3d8cb31002d0ec583fbbda66df4d9efcde6db 100644 (file)
@@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
        if (status == -ENODEV || status == -ENOENT)
                return true;
 
+       if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
+               return false;
+
        if (status == -EPROTO || status == -ETIMEDOUT)
                rt2x00dev->num_proto_errs++;
        else
index 212aaf577d3c5eca878c0793749f142720658a0f..65ef3dc9d061415acc9ce97cd11ebafb742b2b36 100644 (file)
@@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
        info->section_num = GET_FW_HDR_SEC_NUM(fw);
        info->hdr_len = RTW89_FW_HDR_SIZE +
                        info->section_num * RTW89_FW_SECTION_HDR_SIZE;
-       SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
 
        bin = fw + info->hdr_len;
 
@@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
        }
 
        skb_put_data(skb, fw, len);
+       SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
        rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
                                   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
                                   H2C_FUNC_MAC_FWHDR_DL, len);
index 7ee0d932331075359d0af2f14ba2048822e0d531..36e8d0da6c1e78a0af948c8e4d5caaf4dd7e48a8 100644 (file)
@@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en {
        le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
 #define GET_FW_HDR_CMD_VERSERION(fwhdr)        \
        le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
-#define SET_FW_HDR_PART_SIZE(fwhdr, val)       \
-       le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
+{
+       le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
+}
 
 #define SET_CTRL_INFO_MACID(table, val) \
        le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
index cff3b43ca4d7d3f9ce813aef5e9148c10f0c1979..12c03dacb5dd0c032d7963ac7bfb4c6f9e4a3082 100644 (file)
@@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
 bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
 {
        struct ipc_mem_channel *channel;
+       bool hpda_ctrl_pending = false;
        struct sk_buff_head *ul_list;
        bool hpda_pending = false;
-       bool forced_hpdu = false;
        struct ipc_pipe *pipe;
        int i;
 
@@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
                ul_list = &channel->ul_list;
 
                /* Fill the transfer descriptor with the uplink buffer info. */
-               hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+               if (!ipc_imem_check_wwan_ips(channel)) {
+                       hpda_ctrl_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
                                                        pipe, ul_list);
-
-               /* forced HP update needed for non data channels */
-               if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
-                       forced_hpdu = true;
+               } else {
+                       hpda_pending |=
+                               ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+                                                       pipe, ul_list);
+               }
        }
 
-       if (forced_hpdu) {
+       /* forced HP update needed for non data channels */
+       if (hpda_ctrl_pending) {
                hpda_pending = false;
                ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
                                              IPC_HP_UL_WRITE_TD);
@@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
                return;
        }
 
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+               ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
        if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
                ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
 
@@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
                ipc_port_deinit(ipc_imem->ipc_port);
        }
 
-       if (ipc_imem->ipc_devlink)
+       if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
                ipc_devlink_deinit(ipc_imem->ipc_devlink);
 
        ipc_imem_device_ipc_uninit(ipc_imem);
@@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
        ipc_imem->pci_device_id = device_id;
 
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem->cp_version = 0;
        ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
 
@@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
                if (ipc_flash_link_establish(ipc_imem))
                        goto devlink_channel_fail;
+
+               set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
        }
        return ipc_imem;
 devlink_channel_fail:
index 6be6708b4eec86c71309513880d9242501a3d624..6b8a837faef2fe5f064d48681595e8f1724f49f9 100644 (file)
@@ -101,6 +101,7 @@ struct ipc_chnl_cfg;
 #define IOSM_CHIP_INFO_SIZE_MAX 100
 
 #define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
 
 /* List of the supported UL/DL pipes. */
 enum ipc_mem_pipes {
@@ -335,8 +336,6 @@ enum ipc_phase {
  *                             process the irq actions.
  * @flag:                      Flag to monitor the state of driver
  * @td_update_timer_suspended: if true then td update timer suspend
- * @ev_cdev_write_pending:     0 means inform the IPC tasklet to pass
- *                             the accumulated uplink buffers to CP.
  * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
  * @reset_det_n:               Reset detect flag
  * @pcie_wake_n:               Pcie wake flag
@@ -374,7 +373,6 @@ struct iosm_imem {
        u8 ev_irq_pending[IPC_IRQ_VECTORS];
        unsigned long flag;
        u8 td_update_timer_suspended:1,
-          ev_cdev_write_pending:1,
           ev_mux_net_transmit_pending:1,
           reset_det_n:1,
           pcie_wake_n:1;
index 825e8e5ffb2aedc8c32f0066e1451ab9b778b0b5..831cdae28e8a9f56884187cce380966213037205 100644 (file)
@@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
 static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
                                  void *msg, size_t size)
 {
-       ipc_imem->ev_cdev_write_pending = false;
        ipc_imem_ul_send(ipc_imem);
 
        return 0;
@@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
 /* Through tasklet to do sio write. */
 static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
 {
-       if (ipc_imem->ev_cdev_write_pending)
-               return -1;
-
-       ipc_imem->ev_cdev_write_pending = true;
-
        return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
                                        NULL, 0, false);
 }
@@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
        /* Release the pipe resources */
        ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
        ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+       ipc_imem->nr_of_channels--;
 }
 
 void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
index 4a16d6e33c093009937c326694fabaff4bf87636..d9dea4829c86e4ada03c6af4f70e3f9a5cd103ba 100644 (file)
@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        unsigned int rx_queue_max;
        unsigned int rx_queue_len;
        unsigned long last_rx_time;
+       unsigned int rx_slots_needed;
        bool stalled;
 
        struct xenvif_copy_state rx_copy;
index accc991d153f7c787c1b83bedb14183d5019c1c4..dbac4c03d21a14d12a2eee9e7b3418c9cc184d88 100644 (file)
 #include <xen/xen.h>
 #include <xen/events.h>
 
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+/*
+ * Update the needed ring page slots for the first SKB queued.
+ * Note that any call sequence outside the RX thread calling this function
+ * needs to wake up the RX thread via a call of xenvif_kick_thread()
+ * afterwards in order to avoid a race with putting the thread to sleep.
+ */
+static void xenvif_update_needed_slots(struct xenvif_queue *queue,
+                                      const struct sk_buff *skb)
 {
-       RING_IDX prod, cons;
-       struct sk_buff *skb;
-       int needed;
-       unsigned long flags;
-
-       spin_lock_irqsave(&queue->rx_queue.lock, flags);
+       unsigned int needed = 0;
 
-       skb = skb_peek(&queue->rx_queue);
-       if (!skb) {
-               spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
-               return false;
+       if (skb) {
+               needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+               if (skb_is_gso(skb))
+                       needed++;
+               if (skb->sw_hash)
+                       needed++;
        }
 
-       needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
-       if (skb_is_gso(skb))
-               needed++;
-       if (skb->sw_hash)
-               needed++;
+       WRITE_ONCE(queue->rx_slots_needed, needed);
+}
 
-       spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+       RING_IDX prod, cons;
+       unsigned int needed;
+
+       needed = READ_ONCE(queue->rx_slots_needed);
+       if (!needed)
+               return false;
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
 
        spin_lock_irqsave(&queue->rx_queue.lock, flags);
 
-       __skb_queue_tail(&queue->rx_queue, skb);
-
-       queue->rx_queue_len += skb->len;
-       if (queue->rx_queue_len > queue->rx_queue_max) {
+       if (queue->rx_queue_len >= queue->rx_queue_max) {
                struct net_device *dev = queue->vif->dev;
 
                netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+               kfree_skb(skb);
+               queue->vif->dev->stats.rx_dropped++;
+       } else {
+               if (skb_queue_empty(&queue->rx_queue))
+                       xenvif_update_needed_slots(queue, skb);
+
+               __skb_queue_tail(&queue->rx_queue, skb);
+
+               queue->rx_queue_len += skb->len;
        }
 
        spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
 
        skb = __skb_dequeue(&queue->rx_queue);
        if (skb) {
+               xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
+
                queue->rx_queue_len -= skb->len;
                if (queue->rx_queue_len < queue->rx_queue_max) {
                        struct netdev_queue *txq;
@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
                        break;
                xenvif_rx_dequeue(queue);
                kfree_skb(skb);
+               queue->vif->dev->stats.rx_dropped++;
        }
 }
 
@@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
        xenvif_rx_copy_flush(queue);
 }
 
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
 {
        RING_IDX prod, cons;
 
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
 
+       return prod - cons;
+}
+
+static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
+{
+       unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+
        return !queue->stalled &&
-               prod - cons < 1 &&
+               xenvif_rx_queue_slots(queue) < needed &&
                time_after(jiffies,
                           queue->last_rx_time + queue->vif->stall_timeout);
 }
 
 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
 {
-       RING_IDX prod, cons;
-
-       prod = queue->rx.sring->req_prod;
-       cons = queue->rx.req_cons;
+       unsigned int needed = READ_ONCE(queue->rx_slots_needed);
 
-       return queue->stalled && prod - cons >= 1;
+       return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
 }
 
 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
index 911f43986a8c9fdad0afa0d384f4629d32972c53..d514d96027a6ff40b3ec7ef6aac7d03de1490669 100644 (file)
@@ -148,6 +148,9 @@ struct netfront_queue {
        grant_ref_t gref_rx_head;
        grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
 
+       unsigned int rx_rsp_unconsumed;
+       spinlock_t rx_cons_lock;
+
        struct page_pool *page_pool;
        struct xdp_rxq_info xdp_rxq;
 };
@@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
        return 0;
 }
 
-static void xennet_tx_buf_gc(struct netfront_queue *queue)
+static bool xennet_tx_buf_gc(struct netfront_queue *queue)
 {
        RING_IDX cons, prod;
        unsigned short id;
        struct sk_buff *skb;
        bool more_to_do;
+       bool work_done = false;
        const struct device *dev = &queue->info->netdev->dev;
 
        BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
                for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
                        struct xen_netif_tx_response txrsp;
 
+                       work_done = true;
+
                        RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
                        if (txrsp.status == XEN_NETIF_RSP_NULL)
                                continue;
@@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 
        xennet_maybe_wake_tx(queue);
 
-       return;
+       return work_done;
 
  err:
        queue->info->broken = true;
        dev_alert(dev, "Disabled for further use\n");
+
+       return work_done;
 }
 
 struct xennet_gnttab_make_txreq {
@@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev)
        return 0;
 }
 
+static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&queue->rx_cons_lock, flags);
+       queue->rx.rsp_cons = val;
+       queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+       spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+}
+
 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
                                grant_ref_t ref)
 {
@@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
                xennet_move_rx_slot(queue, skb, ref);
        } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
 
-       queue->rx.rsp_cons = cons;
+       xennet_set_rx_rsp_cons(queue, cons);
        return err;
 }
 
@@ -1039,7 +1057,7 @@ next:
        }
 
        if (unlikely(err))
-               queue->rx.rsp_cons = cons + slots;
+               xennet_set_rx_rsp_cons(queue, cons + slots);
 
        return err;
 }
@@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
-                       queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+                       xennet_set_rx_rsp_cons(queue,
+                                              ++cons + skb_queue_len(list));
                        kfree_skb(nskb);
                        return -ENOENT;
                }
@@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
                kfree_skb(nskb);
        }
 
-       queue->rx.rsp_cons = cons;
+       xennet_set_rx_rsp_cons(queue, cons);
 
        return 0;
 }
@@ -1229,7 +1248,9 @@ err:
 
                        if (unlikely(xennet_set_skb_gso(skb, gso))) {
                                __skb_queue_head(&tmpq, skb);
-                               queue->rx.rsp_cons += skb_queue_len(&tmpq);
+                               xennet_set_rx_rsp_cons(queue,
+                                                      queue->rx.rsp_cons +
+                                                      skb_queue_len(&tmpq));
                                goto err;
                        }
                }
@@ -1253,7 +1274,8 @@ err:
 
                __skb_queue_tail(&rxq, skb);
 
-               i = ++queue->rx.rsp_cons;
+               i = queue->rx.rsp_cons + 1;
+               xennet_set_rx_rsp_cons(queue, i);
                work_done++;
        }
        if (need_xdp_flush)
@@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev,
        return 0;
 }
 
-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
 {
-       struct netfront_queue *queue = dev_id;
        unsigned long flags;
 
-       if (queue->info->broken)
-               return IRQ_HANDLED;
+       if (unlikely(queue->info->broken))
+               return false;
 
        spin_lock_irqsave(&queue->tx_lock, flags);
-       xennet_tx_buf_gc(queue);
+       if (xennet_tx_buf_gc(queue))
+               *eoi = 0;
        spin_unlock_irqrestore(&queue->tx_lock, flags);
 
+       return true;
+}
+
+static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+{
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (likely(xennet_handle_tx(dev_id, &eoiflag)))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
 {
-       struct netfront_queue *queue = dev_id;
-       struct net_device *dev = queue->info->netdev;
+       unsigned int work_queued;
+       unsigned long flags;
 
-       if (queue->info->broken)
-               return IRQ_HANDLED;
+       if (unlikely(queue->info->broken))
+               return false;
+
+       spin_lock_irqsave(&queue->rx_cons_lock, flags);
+       work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+       if (work_queued > queue->rx_rsp_unconsumed) {
+               queue->rx_rsp_unconsumed = work_queued;
+               *eoi = 0;
+       } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
+               const struct device *dev = &queue->info->netdev->dev;
+
+               spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+               dev_alert(dev, "RX producer index going backwards\n");
+               dev_alert(dev, "Disabled for further use\n");
+               queue->info->broken = true;
+               return false;
+       }
+       spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
 
-       if (likely(netif_carrier_ok(dev) &&
-                  RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+       if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
                napi_schedule(&queue->napi);
 
+       return true;
+}
+
+static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+{
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (likely(xennet_handle_rx(dev_id, &eoiflag)))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
 {
-       xennet_tx_interrupt(irq, dev_id);
-       xennet_rx_interrupt(irq, dev_id);
+       unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+       if (xennet_handle_tx(dev_id, &eoiflag) &&
+           xennet_handle_rx(dev_id, &eoiflag))
+               xen_irq_lateeoi(irq, eoiflag);
+
        return IRQ_HANDLED;
 }
 
@@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
        if (err < 0)
                goto fail;
 
-       err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
-                                       xennet_interrupt,
-                                       0, queue->info->netdev->name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+                                               xennet_interrupt, 0,
+                                               queue->info->netdev->name,
+                                               queue);
        if (err < 0)
                goto bind_fail;
        queue->rx_evtchn = queue->tx_evtchn;
@@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
 
        snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
                 "%s-tx", queue->name);
-       err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
-                                       xennet_tx_interrupt,
-                                       0, queue->tx_irq_name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+                                               xennet_tx_interrupt, 0,
+                                               queue->tx_irq_name, queue);
        if (err < 0)
                goto bind_tx_fail;
        queue->tx_irq = err;
 
        snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
                 "%s-rx", queue->name);
-       err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
-                                       xennet_rx_interrupt,
-                                       0, queue->rx_irq_name, queue);
+       err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
+                                               xennet_rx_interrupt, 0,
+                                               queue->rx_irq_name, queue);
        if (err < 0)
                goto bind_rx_fail;
        queue->rx_irq = err;
@@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
 
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
+       spin_lock_init(&queue->rx_cons_lock);
 
        timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
 
index 4c63564adeaa6a4c25727893a89b935710966162..1af8a4513708a798b36726dda30787465bb10c33 100644 (file)
@@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
                struct request *rq)
 {
        if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+           ctrl->state != NVME_CTRL_DELETING &&
            ctrl->state != NVME_CTRL_DEAD &&
            !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
            !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
@@ -1749,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
                 */
                if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
                        return -EINVAL;
-               if (ctrl->max_integrity_segments)
-                       ns->features |=
-                               (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+
+               ns->features |= NVME_NS_EXT_LBAS;
+
+               /*
+                * The current fabrics transport drivers support namespace
+                * metadata formats only if nvme_ns_has_pi() returns true.
+                * Suppress support for all other formats so the namespace will
+                * have a 0 capacity and not be usable through the block stack.
+                *
+                * Note, this check will need to be modified if any drivers
+                * gain the ability to use other metadata formats.
+                */
+               if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+                       ns->features |= NVME_NS_METADATA_SUPPORTED;
        } else {
                /*
                 * For PCIe controllers, we can't easily remap the separate
@@ -2696,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
 
                if (tmp->cntlid == ctrl->cntlid) {
                        dev_err(ctrl->device,
-                               "Duplicate cntlid %u with %s, rejecting\n",
-                               ctrl->cntlid, dev_name(tmp->device));
+                               "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+                               ctrl->cntlid, dev_name(tmp->device),
+                               subsys->subnqn);
                        return false;
                }
 
index 7f2071f2460c877d23cfeb0f499feb534705b79e..13e5d503ed0765af2e6eaed65b344a3dc2ef339f 100644 (file)
@@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
        }
        if (ana_log_size > ctrl->ana_log_size) {
                nvme_mpath_stop(ctrl);
-               kfree(ctrl->ana_log_buf);
+               nvme_mpath_uninit(ctrl);
                ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
                if (!ctrl->ana_log_buf)
                        return -ENOMEM;
@@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
 {
        kfree(ctrl->ana_log_buf);
        ctrl->ana_log_buf = NULL;
+       ctrl->ana_log_size = 0;
 }
index b334af8aa264285f508e1c06cb09ed59904e1499..9b095ee01364996da8841d49af66f74d62a27df6 100644 (file)
@@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                return true;
        if (ctrl->ops->flags & NVME_F_FABRICS &&
            ctrl->state == NVME_CTRL_DELETING)
-               return true;
+               return queue_live;
        return __nvme_check_ready(ctrl, rq, queue_live);
 }
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
index bfc259e0d7b87e11b7d9c3b595e413535842c5a7..9f81beb4df4eff844881854c3ed81ee82b07e821 100644 (file)
@@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
        zone.len = ns->zsze;
        zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
        zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
-       zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+       if (zone.cond == BLK_ZONE_COND_FULL)
+               zone.wp = zone.start + zone.len;
+       else
+               zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
 
        return cb(&zone, idx, data);
 }
index cb6a473c3eafa50706dc9c0a7aa0bad173c7b3e9..7c1c43ce466bcbab9a98414bb3c0d74f1f35139b 100644 (file)
@@ -922,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
        size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
        int ret;
 
-       if (!nvme_is_write(cmd->req.cmd) ||
+       /*
+        * This command has not been processed yet, hence we are trying to
+        * figure out if there is still pending data left to receive. If
+        * we don't, we can simply prepare for the next pdu and bail out,
+        * otherwise we will need to prepare a buffer and receive the
+        * stale data before continuing forward.
+        */
+       if (!nvme_is_write(cmd->req.cmd) || !data_len ||
            data_len > cmd->req.port->inline_data_size) {
                nvmet_prepare_receive_pdu(queue);
                return;
index b10f015b2e3775a930c6580502cdeb618c3e7cf3..2b07677a386b7c860d275c6145f7480536640e63 100644 (file)
@@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
 }
 EXPORT_SYMBOL_GPL(of_irq_find_parent);
 
+/*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+ * their own parsing of the property).
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+ * drawing board.
+ */
+static const char * const of_irq_imap_abusers[] = {
+       "CBEA,platform-spider-pic",
+       "sti,platform-spider-pic",
+       "realtek,rtl-intc",
+       "fsl,ls1021a-extirq",
+       "fsl,ls1043a-extirq",
+       "fsl,ls1088a-extirq",
+       "renesas,rza1-irqc",
+       NULL,
+};
+
 /**
  * of_irq_parse_raw - Low level interrupt tree parsing
  * @addr:      address specifier (start of "reg" property of the device) in be32 format
@@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
                /*
                 * Now check if cursor is an interrupt-controller and
                 * if it is then we are done, unless there is an
-                * interrupt-map which takes precedence.
+                * interrupt-map which takes precedence except on one
+                * of these broken platforms that want to parse
+                * interrupt-map themselves for $reason.
                 */
                bool intc = of_property_read_bool(ipar, "interrupt-controller");
 
                imap = of_get_property(ipar, "interrupt-map", &imaplen);
-               if (imap == NULL && intc) {
+               if (intc &&
+                   (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
                        pr_debug(" -> got it !\n");
                        return 0;
                }
index 93b1411105373cd21fe4625bca0e339e8f446c66..7fc5135ffbbfd49be057fe5c33e68d38175d368d 100644 (file)
@@ -332,8 +332,8 @@ config PCIE_APPLE
          If unsure, say Y if you have an Apple Silicon system.
 
 config PCIE_MT7621
-       tristate "MediaTek MT7621 PCIe Controller"
-       depends on (RALINK && SOC_MT7621) || (MIPS && COMPILE_TEST)
+       bool "MediaTek MT7621 PCIe Controller"
+       depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
        select PHY_MT7621_PCI
        default SOC_MT7621
        help
index c24dab383654b00a126e0b2f9e6185e47a6f42f4..722dacdd5a17f8135f110077ab519c424b60163b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/phy/phy.h>
 #include <linux/regulator/consumer.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index 7b17da2f9b3f8d9ddf2c713cce3cad45a46dde1d..cfe66bf04c1d38ba5dbbe8cb6f7772ca2e996a61 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pm_domain.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+#include <linux/module.h>
 
 #include "pcie-designware.h"
 
index c5300d49807a23109bbcf75792ba1f97e7f48c83..c3b725afa11fdbd0b96f0bcc5443731817ee49e8 100644 (file)
@@ -32,7 +32,6 @@
 #define PCIE_CORE_DEV_ID_REG                                   0x0
 #define PCIE_CORE_CMD_STATUS_REG                               0x4
 #define PCIE_CORE_DEV_REV_REG                                  0x8
-#define PCIE_CORE_EXP_ROM_BAR_REG                              0x30
 #define PCIE_CORE_PCIEXP_CAP                                   0xc0
 #define PCIE_CORE_ERR_CAPCTL_REG                               0x118
 #define     PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX                   BIT(5)
@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
                *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
                return PCI_BRIDGE_EMUL_HANDLED;
 
-       case PCI_ROM_ADDRESS1:
-               *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
-               return PCI_BRIDGE_EMUL_HANDLED;
-
        case PCI_INTERRUPT_LINE: {
                /*
                 * From the whole 32bit register we support reading from HW only
@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
                advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
                break;
 
-       case PCI_ROM_ADDRESS1:
-               advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
-               break;
-
        case PCI_INTERRUPT_LINE:
                if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
                        u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
index 1bf4d75b61be7834d56e5132115d4f65df8f59f8..b090924b41feefac638f5a127cdfa9bcf801f2a7 100644 (file)
@@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
        int ret, i;
 
        reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
-                                      GPIOD_OUT_LOW, "#PERST");
+                                      GPIOD_OUT_LOW, "PERST#");
        if (IS_ERR(reset))
                return PTR_ERR(reset);
 
@@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
 
        rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
 
+       /* Assert PERST# before setting up the clock */
+       gpiod_set_value(reset, 1);
+
        ret = apple_pcie_setup_refclk(pcie, port);
        if (ret < 0)
                return ret;
 
+       /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+       usleep_range(100, 200);
+
+       /* Deassert PERST# */
        rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
-       gpiod_set_value(reset, 1);
+       gpiod_set_value(reset, 0);
+
+       /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+       msleep(100);
 
        ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
                                         stat & PORT_STATUS_READY, 100, 250000);
index 48e3f4e47b293c94fd3b45fa14121e72f81d9002..d84cf30bb279086cc5b23ac8692c0b8987eba8c6 100644 (file)
@@ -722,9 +722,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
                goto out_disable;
        }
 
-       /* Ensure that all table entries are masked. */
-       msix_mask_all(base, tsize);
-
        ret = msix_setup_entries(dev, base, entries, nvec, affd);
        if (ret)
                goto out_disable;
@@ -751,6 +748,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
        /* Set MSI-X enabled bits and unmask the function */
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
+
+       /*
+        * Ensure that all table entries are masked to prevent
+        * stale entries from firing in a crash kernel.
+        *
+        * Done late to deal with a broken Marvell NVME device
+        * which takes the MSI-X mask bits into account even
+        * when MSI-X is disabled, which prevents MSI delivery.
+        */
+       msix_mask_all(base, tsize);
        pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 
        pcibios_free_irq(dev);
@@ -777,7 +784,7 @@ out_free:
        free_msi_irqs(dev);
 
 out_disable:
-       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+       pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
 
        return ret;
 }
index c64c6679b1b9a47297adf7576b6c91c2ed7afa27..0ac9634b398dac5a44370ce549ca9c8135ee8dd3 100644 (file)
@@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
                return PTR_ERR(phy->sysctrl);
 
        phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
-       if (IS_ERR(phy->sysctrl))
-               return PTR_ERR(phy->sysctrl);
+       if (IS_ERR(phy->pmctrl))
+               return PTR_ERR(phy->pmctrl);
 
        /* clocks */
        phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
index 08d178a4dc13f5c5405617af08bb89b5ef91de51..aa27c799461040c1a4869f65c22b2b6cb7d5a341 100644 (file)
@@ -82,9 +82,9 @@
  * struct mvebu_cp110_utmi - PHY driver data
  *
  * @regs: PHY registers
- * @syscom: Regmap with system controller registers
+ * @syscon: Regmap with system controller registers
  * @dev: device driver handle
- * @caps: PHY capabilities
+ * @ops: phy ops
  */
 struct mvebu_cp110_utmi {
        void __iomem *regs;
index bfff0c8c9130389e4c472d22a978548aaf7672f3..fec1da470d26da76e6e2cd6280ff71a4549a073d 100644 (file)
@@ -127,12 +127,13 @@ struct phy_drvdata {
 };
 
 /**
- * Write register and read back masked value to confirm it is written
+ * usb_phy_write_readback() - Write register and read back masked value to
+ * confirm it is written
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @offset - register offset.
- * @mask - register bitmask specifying what should be updated
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @offset: register offset.
+ * @mask: register bitmask specifying what should be updated
+ * @val: value to write.
  */
 static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
                                          u32 offset,
@@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr)
 }
 
 /**
- * Write SSPHY register
+ * usb_ss_write_phycreg() - Write SSPHY register
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to write.
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to write.
+ * @val: value to write.
  */
 static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
                                u32 addr, u32 val)
@@ -209,10 +210,11 @@ err_wait:
 }
 
 /**
- * Read SSPHY register.
+ * usb_ss_read_phycreg() - Read SSPHY register.
  *
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to read.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to read.
+ * @val: pointer in which read is store.
  */
 static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
                               u32 addr, u32 *val)
index 456a59d8c7d047ae17629107e6360f02d08cd888..c96639d5f5819ec2b6be258b499e8c7b267b098a 100644 (file)
@@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg {
  * @qmp: QMP phy to which this lane belongs
  * @lane_rst: lane's reset controller
  * @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
  */
 struct qmp_phy {
        struct phy *phy;
index 04d18d52f700d93e4d22a8f47bbae0f936788394..716a77748ed83684af9375ba0cc5b40e3390de5c 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * Copyright (C) 2016 Linaro Ltd
  */
 #include <linux/module.h>
index 7df6a63ad37ba7f1f7aaaa780a67f7750d3e70a3..e4f4a9be513200a43bbc343b32349f56687f1902 100644 (file)
@@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
        if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
                usbphyc_phy->tune |= LFSCAPEN;
 
-       if (of_property_read_bool(np, "st,slow-hs-slew-rate"))
+       if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
                usbphyc_phy->tune |= HSDRVSLEW;
 
        ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
index 2ff56ce77b307a5230c3651183da923a6b9df8cb..c1211c4f863cadf650d838b4d7ec9997594cdee2 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * PCIe SERDES driver for AM654x SoC
  *
  * Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
index 126f5b8735cc1afffe8dbf6deeaa1dedc3486934..b3384c31637ae7e59c04b17a19f988da877cbd27 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * Wrapper driver for SERDES used in J721E
  *
  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
index ebceb1520ce88ade401a7c4e97a35806a90efb48..3a505fe5715addae9a61ce079491a11ef163ce65 100644 (file)
@@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
 }
 
 /**
- * omap_usb2_set_comparator - links the comparator present in the system with
- *     this phy
- * @comparator - the companion phy(comparator) for this phy
+ * omap_usb2_set_comparator() - links the comparator present in the system with this phy
+ *
+ * @comparator the companion phy(comparator) for this phy
  *
  * The phy companion driver should call this API passing the phy_companion
  * filled with set_vbus and start_srp to be used by usb phy.
index a63213f5972a7e591061bf6dce32d05644528ba2..15c1c79e5c294dc66ab551cfad9fc72732289889 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * tusb1210.c - TUSB1210 USB ULPI PHY driver
  *
  * Copyright (C) 2015 Intel Corporation
index 2abcc6ce4eba338b1c62ef6f9f8db1b428ce0fa1..b607d10e4cbd84f96a96a6b03cbf373014ac07e9 100644 (file)
@@ -1244,6 +1244,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
                raw_spin_lock_init(&pc->irq_lock[i]);
        }
 
+       pc->pctl_desc = *pdata->pctl_desc;
+       pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+       if (IS_ERR(pc->pctl_dev)) {
+               gpiochip_remove(&pc->gpio_chip);
+               return PTR_ERR(pc->pctl_dev);
+       }
+
+       pc->gpio_range = *pdata->gpio_range;
+       pc->gpio_range.base = pc->gpio_chip.base;
+       pc->gpio_range.gc = &pc->gpio_chip;
+       pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
        girq = &pc->gpio_chip.irq;
        girq->chip = &bcm2835_gpio_irq_chip;
        girq->parent_handler = bcm2835_gpio_irq_handler;
@@ -1251,8 +1263,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
        girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
                                     sizeof(*girq->parents),
                                     GFP_KERNEL);
-       if (!girq->parents)
+       if (!girq->parents) {
+               pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
                return -ENOMEM;
+       }
 
        if (is_7211) {
                pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
@@ -1307,21 +1321,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
        err = gpiochip_add_data(&pc->gpio_chip, pc);
        if (err) {
                dev_err(dev, "could not add GPIO chip\n");
+               pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
                return err;
        }
 
-       pc->pctl_desc = *pdata->pctl_desc;
-       pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
-       if (IS_ERR(pc->pctl_dev)) {
-               gpiochip_remove(&pc->gpio_chip);
-               return PTR_ERR(pc->pctl_dev);
-       }
-
-       pc->gpio_range = *pdata->gpio_range;
-       pc->gpio_range.base = pc->gpio_chip.base;
-       pc->gpio_range.gc = &pc->gpio_chip;
-       pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
-
        return 0;
 }
 
index 91553b2fc1605f1a8db9c3db02f738485db57d45..53779822348da7cf6fdfe9c225647ecb11d82c8f 100644 (file)
@@ -285,8 +285,12 @@ static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
        desc = (const struct mtk_pin_desc *)hw->soc->pins;
        *gpio_chip = &hw->chip;
 
-       /* Be greedy to guess first gpio_n is equal to eint_n */
-       if (desc[eint_n].eint.eint_n == eint_n)
+       /*
+        * Be greedy to guess first gpio_n is equal to eint_n.
+        * Only eint virtual eint number is greater than gpio number.
+        */
+       if (hw->soc->npins > eint_n &&
+           desc[eint_n].eint.eint_n == eint_n)
                *gpio_n = eint_n;
        else
                *gpio_n = mtk_xt_find_eint_num(hw, eint_n);
index 24764ebcc9368804dd9059d9907e9cee222ca002..9ed76473157076d2b3531b657e3bc4fa1043a151 100644 (file)
@@ -1251,10 +1251,10 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
                bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
                bank->gpio_chip.base = args.args[1];
 
-               npins = args.args[2];
-               while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
-                                                        ++i, &args))
-                       npins += args.args[2];
+               /* get the last defined gpio line (offset + nb of pins) */
+               npins = args.args[0] + args.args[2];
+               while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args))
+                       npins = max(npins, (int)(args.args[0] + args.args[2]));
        } else {
                bank_nr = pctl->nbanks;
                bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
index 04bc3b50aa7a4ddc7724e514a0194403696bde66..65b4a819f1bdf85f49bc7ab61eb918f0bc06bc86 100644 (file)
@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
                pmc->block[i].counters = info[2];
                pmc->block[i].type = info[3];
 
-               if (IS_ERR(pmc->block[i].mmio_base))
-                       return PTR_ERR(pmc->block[i].mmio_base);
+               if (!pmc->block[i].mmio_base)
+                       return -ENOMEM;
 
                ret = mlxbf_pmc_create_groups(dev, i);
                if (ret)
index 21947806168396a6d358058fbf5017a4ea5a16f5..253a096b5dd8c173cb1e0a7f58430ffb1013640b 100644 (file)
@@ -68,7 +68,7 @@ obj-$(CONFIG_THINKPAD_ACPI)   += thinkpad_acpi.o
 obj-$(CONFIG_THINKPAD_LMI)     += think-lmi.o
 
 # Intel
-obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL)               += intel/
+obj-y                          += intel/
 
 # MSI
 obj-$(CONFIG_MSI_LAPTOP)       += msi-laptop.o
index b7e50ed050a802f4fa9b6007ee820acfd20b9a15..230593ae5d6de51c47e2d47ede5a4bc0dfbf285f 100644 (file)
@@ -76,7 +76,7 @@
 #define AMD_CPU_ID_CZN                 AMD_CPU_ID_RN
 #define AMD_CPU_ID_YC                  0x14B5
 
-#define PMC_MSG_DELAY_MIN_US           100
+#define PMC_MSG_DELAY_MIN_US           50
 #define RESPONSE_REGISTER_LOOP_MAX     20000
 
 #define SOC_SUBSYSTEM_IP_MAX   12
@@ -508,7 +508,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops amd_pmc_pm_ops = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(amd_pmc_suspend, amd_pmc_resume)
+       .suspend_noirq = amd_pmc_suspend,
+       .resume_noirq = amd_pmc_resume,
 };
 
 static const struct pci_device_id pmc_pci_ids[] = {
index 9aae45a452002cf842d9c5fb100010a2f843a6df..57553f9b4d1dcdc2f5514dc5382573a8d1b081bb 100644 (file)
@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
        }
 
        gmux_data->iostart = res->start;
-       gmux_data->iolen = res->end - res->start;
+       gmux_data->iolen = resource_size(res);
 
        if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
                pr_err("gmux I/O region too small (%lu < %u)\n",
index 38ce3e3445892aeaefc0da1bf740d1f35874df69..40096b25994af7212123dc9e869289fb6d16ffea 100644 (file)
@@ -3,19 +3,6 @@
 # Intel x86 Platform Specific Drivers
 #
 
-menuconfig X86_PLATFORM_DRIVERS_INTEL
-       bool "Intel x86 Platform Specific Device Drivers"
-       default y
-       help
-         Say Y here to get to see options for device drivers for
-         various Intel x86 platforms, including vendor-specific
-         drivers. This option alone does not add any kernel code.
-
-         If you say N, all options in this submenu will be skipped
-         and disabled.
-
-if X86_PLATFORM_DRIVERS_INTEL
-
 source "drivers/platform/x86/intel/atomisp2/Kconfig"
 source "drivers/platform/x86/intel/int1092/Kconfig"
 source "drivers/platform/x86/intel/int33fe/Kconfig"
@@ -183,5 +170,3 @@ config INTEL_UNCORE_FREQ_CONTROL
 
          To compile this driver as a module, choose M here: the module
          will be called intel-uncore-frequency.
-
-endif # X86_PLATFORM_DRIVERS_INTEL
index 08598942a6d780388286ef501be45caa851625e5..13f8cf70b9aee559d17fcb100abc748652a9b68a 100644 (file)
@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
                },
        },
+       {
+               .ident = "Microsoft Surface Go 3",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+               },
+       },
        { }
 };
 
index 73797680b895c370e7b1cf043010dc1f1ab53d4f..15ca8afdd973de2a2b0c7060e8b26b4beb2951bf 100644 (file)
@@ -65,7 +65,7 @@ static int __init pmc_core_platform_init(void)
 
        retval = platform_device_register(pmc_core_device);
        if (retval)
-               kfree(pmc_core_device);
+               platform_device_put(pmc_core_device);
 
        return retval;
 }
index ae9293024c77bd23427a9089f19659cb1f894d59..a91847a551a725336a063235697046d660c698dc 100644 (file)
@@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device)
        if (product && strlen(product) > 4)
                switch (product[4]) {
                case '5':
+                       if (strlen(product) > 5)
+                               switch (product[5]) {
+                               case 'N':
+                                       year = 2021;
+                                       break;
+                               case '0':
+                                       year = 2016;
+                                       break;
+                               default:
+                                       year = 2022;
+                               }
+                       break;
                case '6':
                        year = 2016;
                        break;
index 8b292ee95a1448f3637fbeb887b557c009836e7e..7299ad08c838780bd27edd34fc2f78cd01f0e87c 100644 (file)
@@ -35,6 +35,7 @@ struct system76_data {
        union acpi_object *nfan;
        union acpi_object *ntmp;
        struct input_dev *input;
+       bool has_open_ec;
 };
 
 static const struct acpi_device_id device_ids[] = {
@@ -279,20 +280,12 @@ static struct acpi_battery_hook system76_battery_hook = {
 
 static void system76_battery_init(void)
 {
-       acpi_handle handle;
-
-       handle = ec_get_handle();
-       if (handle && acpi_has_method(handle, "GBCT"))
-               battery_hook_register(&system76_battery_hook);
+       battery_hook_register(&system76_battery_hook);
 }
 
 static void system76_battery_exit(void)
 {
-       acpi_handle handle;
-
-       handle = ec_get_handle();
-       if (handle && acpi_has_method(handle, "GBCT"))
-               battery_hook_unregister(&system76_battery_hook);
+       battery_hook_unregister(&system76_battery_hook);
 }
 
 // Get the airplane mode LED brightness
@@ -673,6 +666,10 @@ static int system76_add(struct acpi_device *acpi_dev)
        acpi_dev->driver_data = data;
        data->acpi_dev = acpi_dev;
 
+       // Some models do not run open EC firmware. Check for an ACPI method
+       // that only exists on open EC to guard functionality specific to it.
+       data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
+
        err = system76_get(data, "INIT");
        if (err)
                return err;
@@ -718,27 +715,31 @@ static int system76_add(struct acpi_device *acpi_dev)
        if (err)
                goto error;
 
-       err = system76_get_object(data, "NFAN", &data->nfan);
-       if (err)
-               goto error;
+       if (data->has_open_ec) {
+               err = system76_get_object(data, "NFAN", &data->nfan);
+               if (err)
+                       goto error;
 
-       err = system76_get_object(data, "NTMP", &data->ntmp);
-       if (err)
-               goto error;
+               err = system76_get_object(data, "NTMP", &data->ntmp);
+               if (err)
+                       goto error;
 
-       data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
-               "system76_acpi", data, &thermal_chip_info, NULL);
-       err = PTR_ERR_OR_ZERO(data->therm);
-       if (err)
-               goto error;
+               data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+                       "system76_acpi", data, &thermal_chip_info, NULL);
+               err = PTR_ERR_OR_ZERO(data->therm);
+               if (err)
+                       goto error;
 
-       system76_battery_init();
+               system76_battery_init();
+       }
 
        return 0;
 
 error:
-       kfree(data->ntmp);
-       kfree(data->nfan);
+       if (data->has_open_ec) {
+               kfree(data->ntmp);
+               kfree(data->nfan);
+       }
        return err;
 }
 
@@ -749,14 +750,15 @@ static int system76_remove(struct acpi_device *acpi_dev)
 
        data = acpi_driver_data(acpi_dev);
 
-       system76_battery_exit();
+       if (data->has_open_ec) {
+               system76_battery_exit();
+               kfree(data->nfan);
+               kfree(data->ntmp);
+       }
 
        devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
        devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
 
-       kfree(data->nfan);
-       kfree(data->ntmp);
-
        system76_get(data, "FINI");
 
        return 0;
index b3ac9c3f3b7c647199216c20d7c5096cb1022a71..bb1abb947e1eaf5061944fc6fac218affc67069e 100644 (file)
@@ -3015,6 +3015,8 @@ static struct attribute *hotkey_attributes[] = {
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
+       &dev_attr_hotkey_tablet_mode.attr,
+       &dev_attr_hotkey_radio_sw.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
        &dev_attr_hotkey_poll_freq.attr,
@@ -5726,11 +5728,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
        "tpacpi::standby",
        "tpacpi::dock_status1",
        "tpacpi::dock_status2",
-       "tpacpi::unknown_led2",
+       "tpacpi::lid_logo_dot",
        "tpacpi::unknown_led3",
        "tpacpi::thinkvantage",
 };
-#define TPACPI_SAFE_LEDS       0x1081U
+#define TPACPI_SAFE_LEDS       0x1481U
 
 static inline bool tpacpi_is_led_restricted(const unsigned int led)
 {
index fa8812039b82b760eadd588665524f73da3327b6..17dd54d4b783c391cdf0fb7c84a7e3dd17dd6ef1 100644 (file)
@@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
        .properties = trekstor_primetab_t13b_props,
 };
 
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+       { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = trekstor_surftab_duo_w1_props,
+};
+
 static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
@@ -1502,6 +1512,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Primetab T13B"),
                },
        },
+       {
+               /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+               .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+               },
+       },
        {
                /* TrekStor SurfTab twin 10.1 ST10432-8 */
                .driver_data = (void *)&trekstor_surftab_twin_10_1_data,
index b9fac786246ab5c076da2d6248bfeb136adbbc31..2a5c1829aab790f6b4ade183ec96e2eca84c357b 100644 (file)
@@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
 
 static int __init init_dtpm(void)
 {
-       struct dtpm_descr *dtpm_descr;
-
        pct = powercap_register_control_type(NULL, "dtpm", NULL);
        if (IS_ERR(pct)) {
                pr_err("Failed to register control type\n");
                return PTR_ERR(pct);
        }
 
-       for_each_dtpm_table(dtpm_descr)
-               dtpm_descr->init();
-
        return 0;
 }
 late_initcall(init_dtpm);
index 4c5bba52b10593890c9a95ccea929148db9cdc5f..24d3395964cc4ba2d3934a32299fef3f667cd45f 100644 (file)
@@ -20,7 +20,6 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
-       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -31,13 +30,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       err = tegra_bpmp_transfer(bpmp, &msg);
-       if (err)
-               return err;
-       if (msg.rx.ret)
-               return -EINVAL;
-
-       return 0;
+       return tegra_bpmp_transfer(bpmp, &msg);
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index b940e0268f96fad37563e49782fafa7b240a0588..e83453bea2aee1eaf2957b5223f8b41685c7760c 100644 (file)
@@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                /* NPort Recovery mode or node is just allocated */
                if (!lpfc_nlp_not_used(ndlp)) {
                        /* A LOGO is completing and the node is in NPR state.
-                        * If this a fabric node that cleared its transport
-                        * registration, release the rpi.
+                        * Just unregister the RPI because the node is still
+                        * required.
                         */
-                       spin_lock_irq(&ndlp->lock);
-                       ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
-                               ndlp->nlp_flag |= NLP_RELEASE_RPI;
-                       spin_unlock_irq(&ndlp->lock);
                        lpfc_unreg_rpi(vport, ndlp);
                } else {
                        /* Indicate the node has already released, should
index bed8cc125544841cf523ace529729203e1978b24..fbfeb0b046ddddfa490772a78e9a082a42084d0b 100644 (file)
@@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        if (rc) {
                pm8001_dbg(pm8001_ha, FAIL,
                           "pm8001_setup_irq failed [ret: %d]\n", rc);
-               goto err_out_shost;
+               goto err_out;
        }
        /* Request Interrupt */
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
-               goto err_out_shost;
+               goto err_out;
 
        count = pm8001_ha->max_q_num;
        /* Queues are chosen based on the number of cores/msix availability */
@@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
        pm8001_tag_init(pm8001_ha);
        return 0;
 
-err_out_shost:
-       scsi_remove_host(pm8001_ha->shost);
 err_out_nodev:
        for (i = 0; i < pm8001_ha->max_memcnt; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
index b9f6d83ff380c872bba3d5e40f65043d2a892d7f..2101fc5761c3c2c00f049255a6c2aa3162f09eb8 100644 (file)
@@ -3053,7 +3053,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        struct smp_completion_resp *psmpPayload;
        struct task_status_struct *ts;
        struct pm8001_device *pm8001_dev;
-       char *pdma_respaddr = NULL;
 
        psmpPayload = (struct smp_completion_resp *)(piomb + 4);
        status = le32_to_cpu(psmpPayload->status);
@@ -3080,19 +3079,23 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (pm8001_dev)
                        atomic_dec(&pm8001_dev->running_req);
                if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       struct scatterlist *sg_resp = &t->smp_task.smp_resp;
+                       u8 *payload;
+                       void *to;
+
                        pm8001_dbg(pm8001_ha, IO,
                                   "DIRECT RESPONSE Length:%d\n",
                                   param);
-                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
-                                               ((u64)sg_dma_address
-                                               (&t->smp_task.smp_resp))));
+                       to = kmap_atomic(sg_page(sg_resp));
+                       payload = to + sg_resp->offset;
                        for (i = 0; i < param; i++) {
-                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               *(payload + i) = psmpPayload->_r_a[i];
                                pm8001_dbg(pm8001_ha, IO,
                                           "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
-                                          i, *(pdma_respaddr + i),
+                                          i, *(payload + i),
                                           psmpPayload->_r_a[i]);
                        }
+                       kunmap_atomic(to);
                }
                break;
        case IO_ABORTED:
@@ -4236,14 +4239,14 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        struct sas_task *task = ccb->task;
        struct domain_device *dev = task->dev;
        struct pm8001_device *pm8001_dev = dev->lldd_dev;
-       struct scatterlist *sg_req, *sg_resp;
+       struct scatterlist *sg_req, *sg_resp, *smp_req;
        u32 req_len, resp_len;
        struct smp_req smp_cmd;
        u32 opc;
        struct inbound_queue_table *circularQ;
-       char *preq_dma_addr = NULL;
-       __le64 tmp_addr;
        u32 i, length;
+       u8 *payload;
+       u8 *to;
 
        memset(&smp_cmd, 0, sizeof(smp_cmd));
        /*
@@ -4280,8 +4283,9 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                pm8001_ha->smp_exp_mode = SMP_INDIRECT;
 
 
-       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
-       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+       smp_req = &task->smp_task.smp_req;
+       to = kmap_atomic(sg_page(smp_req));
+       payload = to + smp_req->offset;
 
        /* INDIRECT MODE command settings. Use DMA */
        if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
@@ -4289,7 +4293,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                /* for SPCv indirect mode. Place the top 4 bytes of
                 * SMP Request header here. */
                for (i = 0; i < 4; i++)
-                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+                       smp_cmd.smp_req16[i] = *(payload + i);
                /* exclude top 4 bytes for SMP req header */
                smp_cmd.long_smp_req.long_req_addr =
                        cpu_to_le64((u64)sg_dma_address
@@ -4320,20 +4324,20 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
                pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
                for (i = 0; i < length; i++)
                        if (i < 16) {
-                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               smp_cmd.smp_req16[i] = *(payload + i);
                                pm8001_dbg(pm8001_ha, IO,
                                           "Byte[%d]:%x (DMA data:%x)\n",
                                           i, smp_cmd.smp_req16[i],
-                                          *(preq_dma_addr));
+                                          *(payload));
                        } else {
-                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               smp_cmd.smp_req[i] = *(payload + i);
                                pm8001_dbg(pm8001_ha, IO,
                                           "Byte[%d]:%x (DMA data:%x)\n",
                                           i, smp_cmd.smp_req[i],
-                                          *(preq_dma_addr));
+                                          *(payload));
                        }
        }
-
+       kunmap_atomic(to);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
                                &smp_cmd, pm8001_ha->smp_exp_mode, length);
        rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
index 84a4204a2cb472f0cffe8df555b0bf56e6dec823..5916ed7662d56ea79ccce4be337f319e49baae06 100644 (file)
@@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
 {
        struct qedi_work_map *work, *work_tmp;
        u32 proto_itt = cqe->itid;
-       itt_t protoitt = 0;
        int found = 0;
        struct qedi_cmd *qedi_cmd = NULL;
        u32 iscsi_cid;
@@ -812,16 +811,12 @@ unlock:
        return;
 
 check_cleanup_reqs:
-       if (qedi_conn->cmd_cleanup_req > 0) {
-               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+       if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+           qedi_conn->cmd_cleanup_req) {
+               QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "Freeing tid=0x%x for cid=0x%x\n",
                          cqe->itid, qedi_conn->iscsi_conn_id);
-               qedi_conn->cmd_cleanup_cmpl++;
                wake_up(&qedi_conn->wait_queue);
-       } else {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
-                        protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
        }
 }
 
@@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        }
 
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
                  qedi_conn->iscsi_conn_id);
 
        rval  = wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                                ((qedi_conn->cmd_cleanup_req ==
-                                                qedi_conn->cmd_cleanup_cmpl) ||
-                                                test_bit(QEDI_IN_RECOVERY,
-                                                         &qedi->flags)),
-                                                5 * HZ);
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ);
        if (rval) {
                QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                          "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                          qedi_conn->cmd_cleanup_req,
-                         qedi_conn->cmd_cleanup_cmpl,
+                         atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                          qedi_conn->iscsi_conn_id);
 
                return 0;
@@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
                  "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
                  qedi_conn->cmd_cleanup_req,
-                 qedi_conn->cmd_cleanup_cmpl,
+                 atomic_read(&qedi_conn->cmd_cleanup_cmpl),
                  qedi_conn->iscsi_conn_id);
 
        iscsi_host_for_each_session(qedi->shost,
@@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        /* Enable IOs for all other sessions except current.*/
        if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
-                                             (qedi_conn->cmd_cleanup_req ==
-                                              qedi_conn->cmd_cleanup_cmpl) ||
-                                              test_bit(QEDI_IN_RECOVERY,
-                                                       &qedi->flags),
-                                             5 * HZ)) {
+                               (qedi_conn->cmd_cleanup_req ==
+                                atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+                               test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+                               5 * HZ)) {
                iscsi_host_for_each_session(qedi->shost,
                                            qedi_mark_device_available);
                return -1;
@@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
 
        qedi_ep = qedi_conn->ep;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (!qedi_ep) {
                QEDI_WARN(&qedi->dbg_ctx,
index 88aa7d8b11c9a2508638135896c9a035e810683f..282ecb4e39bbdcf7f64fd63909ead221fad7722f 100644 (file)
@@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
        qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
        qedi_conn->fw_cid = qedi_ep->fw_cid;
        qedi_conn->cmd_cleanup_req = 0;
-       qedi_conn->cmd_cleanup_cmpl = 0;
+       atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
 
        if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
                rc = -EINVAL;
index a282860da0aa08c4debeee9f457241645190d33f..9b9f2e44fdde480b53e950b5c8a1291033fd0c4a 100644 (file)
@@ -155,7 +155,7 @@ struct qedi_conn {
        spinlock_t list_lock;           /* internal conn lock */
        u32 active_cmd_count;
        u32 cmd_cleanup_req;
-       u32 cmd_cleanup_cmpl;
+       atomic_t cmd_cleanup_cmpl;
 
        u32 iscsi_conn_id;
        int itt;
index 25549a8a2d72dd7bbe1ad05a34c80f082bcf3403..7cf1f78cbaeee6e0fec9f0ba8460f8d187b6cac0 100644 (file)
@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
        struct va_format vaf;
        char pbuf[64];
 
+       if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+               return;
+
        va_start(va, fmt);
 
        vaf.fmt = fmt;
index 3c0da3770edf9193f192b7cb871e5db384be7ea4..2104973a35cd35695a5b47fd54912149a53f9f28 100644 (file)
@@ -4342,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
        rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
                            max_zones);
 
-       arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+       arr = kzalloc(alloc_len, GFP_ATOMIC);
        if (!arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
index 51424557810dab378a5178d2f7beda751fa12a94..f725248ba57f428f107a822880a4ebde605135ab 100644 (file)
@@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
        return err;
 }
 
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+       hba->nop_out_timeout = 200;
+       hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+       return ufs_intel_common_init(hba);
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_adl_init,
+       .exit                   = ufs_intel_common_exit,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int ufshcd_pci_restore(struct device *dev)
 {
@@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
        { }     /* terminate list */
 };
 
index 519b3651d1d966903b8c9e7bf0a1c21c550def12..c2f076b56e2471f1736baaf1042a6e6ffcd49e00 100644 (file)
@@ -17,6 +17,7 @@
 
 #define BLK_SFT_RSTN   0x0
 #define BLK_CLK_EN     0x4
+#define BLK_MIPI_RESET_DIV     0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
 
 struct imx8m_blk_ctrl_domain;
 
@@ -36,6 +37,15 @@ struct imx8m_blk_ctrl_domain_data {
        const char *gpc_name;
        u32 rst_mask;
        u32 clk_mask;
+
+       /*
+        * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+        * which is used to control the reset for the MIPI Phy.
+        * Since it's only present in certain circumstances,
+        * an if-statement should be used before setting and clearing this
+        * register.
+        */
+       u32 mipi_phy_rst_mask;
 };
 
 #define DOMAIN_MAX_CLKS 3
@@ -78,6 +88,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
 
        /* put devices into reset */
        regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+       if (data->mipi_phy_rst_mask)
+               regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
 
        /* enable upstream and blk-ctrl clocks to allow reset to propagate */
        ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
@@ -99,6 +111,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
 
        /* release reset */
        regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+       if (data->mipi_phy_rst_mask)
+               regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
 
        /* disable upstream clocks */
        clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -120,6 +134,9 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
        struct imx8m_blk_ctrl *bc = domain->bc;
 
        /* put devices into reset and disable clocks */
+       if (data->mipi_phy_rst_mask)
+               regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+
        regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
        regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
 
@@ -480,6 +497,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
                .gpc_name = "mipi-dsi",
                .rst_mask = BIT(5),
                .clk_mask = BIT(8) | BIT(9),
+               .mipi_phy_rst_mask = BIT(17),
        },
        [IMX8MM_DISPBLK_PD_MIPI_CSI] = {
                .name = "dispblk-mipi-csi",
@@ -488,6 +506,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
                .gpc_name = "mipi-csi",
                .rst_mask = BIT(3) | BIT(4),
                .clk_mask = BIT(10) | BIT(11),
+               .mipi_phy_rst_mask = BIT(16),
        },
 };
 
index ac6d856ba228d451d5abfdb8525c670c812ffdc4..77bc12039c3d444ef81a7404b767523d7acf166b 100644 (file)
@@ -36,6 +36,10 @@ static int __init imx_soc_device_init(void)
        int ret;
        int i;
 
+       /* Return early if this is running on devices with different SoCs */
+       if (!__mxc_cpu_type)
+               return 0;
+
        if (of_machine_is_compatible("fsl,ls1021a"))
                return 0;
 
index f2151815db585b3f22bd968f0f1e984069b3b3fb..e714ed3b61bc368a7100869253657bef32da25e7 100644 (file)
@@ -320,7 +320,7 @@ static struct platform_driver tegra_fuse_driver = {
 };
 builtin_platform_driver(tegra_fuse_driver);
 
-bool __init tegra_fuse_read_spare(unsigned int spare)
+u32 __init tegra_fuse_read_spare(unsigned int spare)
 {
        unsigned int offset = fuse->soc->info->spare + spare * 4;
 
index de58feba0435015fecd06f20acdcbd911f52f464..ecff0c08e9595ace7f69deb0d99183d4ba0a6235 100644 (file)
@@ -65,7 +65,7 @@ struct tegra_fuse {
 void tegra_init_revision(void);
 void tegra_init_apbmisc(void);
 
-bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_spare(unsigned int spare);
 u32 __init tegra_fuse_read_early(unsigned int offset);
 
 u8 tegra_get_major_rev(void);
index 46feafe4e201c9b30ed006eb346fc47b5ac8887f..d8cc4b270644adfb06a0b55262eb8eecf9abf9d7 100644 (file)
@@ -901,7 +901,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        return 0;
 
 error_clk:
-       clk_disable_unprepare(spi->clk);
+       clk_unprepare(spi->clk);
 error:
        spi_master_put(master);
 out:
index da6b88e80dc07e0afd1fc1d7eb9235c27a53a60b..297dc62bca2986f014c4b4060c95d112a807fe7d 100644 (file)
@@ -203,9 +203,8 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
 
        *ta_size = roundup(fw->size, PAGE_SIZE);
        *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
-       if (IS_ERR(*ta)) {
-               pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
-                      (u64)*ta);
+       if (!*ta) {
+               pr_err("%s: get_free_pages failed\n", __func__);
                rc = -ENOMEM;
                goto rel_fw;
        }
index ab2edfcc6c7042bbdcea826ceb7c118ca06a5fe2..2a66a5203d2fad30b8943d0f5ade421ec76dbc7f 100644 (file)
@@ -48,10 +48,8 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
                        goto err;
                }
 
-               for (i = 0; i < nr_pages; i++) {
-                       pages[i] = page;
-                       page++;
-               }
+               for (i = 0; i < nr_pages; i++)
+                       pages[i] = page + i;
 
                shm->flags |= TEE_SHM_REGISTER;
                rc = shm_register(shm->ctx, shm, pages, nr_pages,
index 6196d7c3888f52e33eafa1c1d7314936c343a9a3..cf2e3293567d92e7ab69af5e33e400f057607061 100644 (file)
@@ -23,6 +23,7 @@
 #include "optee_private.h"
 #include "optee_smc.h"
 #include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
 #define CREATE_TRACE_POINTS
 #include "optee_trace.h"
 
@@ -783,6 +784,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
                        param->a4 = 0;
                        param->a5 = 0;
                }
+               kmemleak_not_leak(shm);
                break;
        case OPTEE_SMC_RPC_FUNC_FREE:
                shm = reg_pair_to_ptr(param->a1, param->a2);
index 8a8deb95e918e90b4ff17005579863528ad24c7c..499fccba3d74bd5471eea8aa609811c00ea17029 100644 (file)
@@ -1,20 +1,17 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
  */
+#include <linux/anon_inodes.h>
 #include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
 #include <linux/idr.h>
+#include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/uio.h>
-#include <linux/module.h>
 #include "tee_private.h"
 
-MODULE_IMPORT_NS(DMA_BUF);
-
 static void release_registered_pages(struct tee_shm *shm)
 {
        if (shm->pages) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
        }
 }
 
-static void tee_shm_release(struct tee_shm *shm)
+static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
 {
-       struct tee_device *teedev = shm->ctx->teedev;
-
-       if (shm->flags & TEE_SHM_DMA_BUF) {
-               mutex_lock(&teedev->mutex);
-               idr_remove(&teedev->idr, shm->id);
-               mutex_unlock(&teedev->mutex);
-       }
-
        if (shm->flags & TEE_SHM_POOL) {
                struct tee_shm_pool_mgr *poolm;
 
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
        tee_device_put(teedev);
 }
 
-static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
-                       *attach, enum dma_data_direction dir)
-{
-       return NULL;
-}
-
-static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
-                                    struct sg_table *table,
-                                    enum dma_data_direction dir)
-{
-}
-
-static void tee_shm_op_release(struct dma_buf *dmabuf)
-{
-       struct tee_shm *shm = dmabuf->priv;
-
-       tee_shm_release(shm);
-}
-
-static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-       struct tee_shm *shm = dmabuf->priv;
-       size_t size = vma->vm_end - vma->vm_start;
-
-       /* Refuse sharing shared memory provided by application */
-       if (shm->flags & TEE_SHM_USER_MAPPED)
-               return -EINVAL;
-
-       return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
-                              size, vma->vm_page_prot);
-}
-
-static const struct dma_buf_ops tee_shm_dma_buf_ops = {
-       .map_dma_buf = tee_shm_op_map_dma_buf,
-       .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
-       .release = tee_shm_op_release,
-       .mmap = tee_shm_op_mmap,
-};
-
 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
 {
        struct tee_device *teedev = ctx->teedev;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                goto err_dev_put;
        }
 
+       refcount_set(&shm->refcount, 1);
        shm->flags = flags | TEE_SHM_POOL;
        shm->ctx = ctx;
        if (flags & TEE_SHM_DMA_BUF)
@@ -153,10 +104,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                goto err_kfree;
        }
 
-
        if (flags & TEE_SHM_DMA_BUF) {
-               DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
                mutex_lock(&teedev->mutex);
                shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
                mutex_unlock(&teedev->mutex);
@@ -164,28 +112,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
                        ret = ERR_PTR(shm->id);
                        goto err_pool_free;
                }
-
-               exp_info.ops = &tee_shm_dma_buf_ops;
-               exp_info.size = shm->size;
-               exp_info.flags = O_RDWR;
-               exp_info.priv = shm;
-
-               shm->dmabuf = dma_buf_export(&exp_info);
-               if (IS_ERR(shm->dmabuf)) {
-                       ret = ERR_CAST(shm->dmabuf);
-                       goto err_rem;
-               }
        }
 
        teedev_ctx_get(ctx);
 
        return shm;
-err_rem:
-       if (flags & TEE_SHM_DMA_BUF) {
-               mutex_lock(&teedev->mutex);
-               idr_remove(&teedev->idr, shm->id);
-               mutex_unlock(&teedev->mutex);
-       }
 err_pool_free:
        poolm->ops->free(poolm, shm);
 err_kfree:
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                goto err;
        }
 
+       refcount_set(&shm->refcount, 1);
        shm->flags = flags | TEE_SHM_REGISTER;
        shm->ctx = ctx;
        shm->id = -1;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
                goto err;
        }
 
-       if (flags & TEE_SHM_DMA_BUF) {
-               DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-               exp_info.ops = &tee_shm_dma_buf_ops;
-               exp_info.size = shm->size;
-               exp_info.flags = O_RDWR;
-               exp_info.priv = shm;
-
-               shm->dmabuf = dma_buf_export(&exp_info);
-               if (IS_ERR(shm->dmabuf)) {
-                       ret = ERR_CAST(shm->dmabuf);
-                       teedev->desc->ops->shm_unregister(ctx, shm);
-                       goto err;
-               }
-       }
-
        return shm;
 err:
        if (shm) {
@@ -339,6 +255,35 @@ err:
 }
 EXPORT_SYMBOL_GPL(tee_shm_register);
 
+static int tee_shm_fop_release(struct inode *inode, struct file *filp)
+{
+       tee_shm_put(filp->private_data);
+       return 0;
+}
+
+static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct tee_shm *shm = filp->private_data;
+       size_t size = vma->vm_end - vma->vm_start;
+
+       /* Refuse sharing shared memory provided by application */
+       if (shm->flags & TEE_SHM_USER_MAPPED)
+               return -EINVAL;
+
+       /* check for overflowing the buffer's size */
+       if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
+               return -EINVAL;
+
+       return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+                              size, vma->vm_page_prot);
+}
+
+static const struct file_operations tee_shm_fops = {
+       .owner = THIS_MODULE,
+       .release = tee_shm_fop_release,
+       .mmap = tee_shm_fop_mmap,
+};
+
 /**
  * tee_shm_get_fd() - Increase reference count and return file descriptor
  * @shm:       Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
        if (!(shm->flags & TEE_SHM_DMA_BUF))
                return -EINVAL;
 
-       get_dma_buf(shm->dmabuf);
-       fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+       /* matched by tee_shm_put() in tee_shm_op_release() */
+       refcount_inc(&shm->refcount);
+       fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
        if (fd < 0)
-               dma_buf_put(shm->dmabuf);
+               tee_shm_put(shm);
        return fd;
 }
 
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
  */
 void tee_shm_free(struct tee_shm *shm)
 {
-       /*
-        * dma_buf_put() decreases the dmabuf reference counter and will
-        * call tee_shm_release() when the last reference is gone.
-        *
-        * In the case of driver private memory we call tee_shm_release
-        * directly instead as it doesn't have a reference counter.
-        */
-       if (shm->flags & TEE_SHM_DMA_BUF)
-               dma_buf_put(shm->dmabuf);
-       else
-               tee_shm_release(shm);
+       tee_shm_put(shm);
 }
 EXPORT_SYMBOL_GPL(tee_shm_free);
 
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
        teedev = ctx->teedev;
        mutex_lock(&teedev->mutex);
        shm = idr_find(&teedev->idr, id);
+       /*
+        * If the tee_shm was found in the IDR it must have a refcount
+        * larger than 0 due to the guarantee in tee_shm_put() below. So
+        * it's safe to use refcount_inc().
+        */
        if (!shm || shm->ctx != ctx)
                shm = ERR_PTR(-EINVAL);
-       else if (shm->flags & TEE_SHM_DMA_BUF)
-               get_dma_buf(shm->dmabuf);
+       else
+               refcount_inc(&shm->refcount);
        mutex_unlock(&teedev->mutex);
        return shm;
 }
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
  */
 void tee_shm_put(struct tee_shm *shm)
 {
-       if (shm->flags & TEE_SHM_DMA_BUF)
-               dma_buf_put(shm->dmabuf);
+       struct tee_device *teedev = shm->ctx->teedev;
+       bool do_release = false;
+
+       mutex_lock(&teedev->mutex);
+       if (refcount_dec_and_test(&shm->refcount)) {
+               /*
+                * refcount has reached 0, we must now remove it from the
+                * IDR before releasing the mutex. This will guarantee that
+                * the refcount_inc() in tee_shm_get_from_id() never starts
+                * from 0.
+                */
+               if (shm->flags & TEE_SHM_DMA_BUF)
+                       idr_remove(&teedev->idr, shm->id);
+               do_release = true;
+       }
+       mutex_unlock(&teedev->mutex);
+
+       if (do_release)
+               tee_shm_release(teedev, shm);
 }
 EXPORT_SYMBOL_GPL(tee_shm_put);
index b25b54d4bac1ad0acb3e295b4c4acabb379d1322..e693ec8234fbcea7572accb8c733abab90945227 100644 (file)
@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
 };
 
 static const struct mmio_reg tgl_fivr_mmio_regs[] = {
-       { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
+       { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
        { 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
        { 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
        { 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
index 71e0dd2c0ce5b76717c96232bb9fcedf635a8091..ebaf7500f48f1f4d6335cce34627d1354277d1b6 100644 (file)
@@ -37,6 +37,8 @@ struct xencons_info {
        struct xenbus_device *xbdev;
        struct xencons_interface *intf;
        unsigned int evtchn;
+       XENCONS_RING_IDX out_cons;
+       unsigned int out_cons_same;
        struct hvc_struct *hvc;
        int irq;
        int vtermno;
@@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        XENCONS_RING_IDX cons, prod;
        int recv = 0;
        struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+       unsigned int eoiflag = 0;
+
        if (xencons == NULL)
                return -EINVAL;
        intf = xencons->intf;
@@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        mb();                   /* read ring before consuming */
        intf->in_cons = cons;
 
-       notify_daemon(xencons);
+       /*
+        * When to mark interrupt having been spurious:
+        * - there was no new data to be read, and
+        * - the backend did not consume some output bytes, and
+        * - the previous round with no read data didn't see consumed bytes
+        *   (we might have a race with an interrupt being in flight while
+        *   updating xencons->out_cons, so account for that by allowing one
+        *   round without any visible reason)
+        */
+       if (intf->out_cons != xencons->out_cons) {
+               xencons->out_cons = intf->out_cons;
+               xencons->out_cons_same = 0;
+       }
+       if (recv) {
+               notify_daemon(xencons);
+       } else if (xencons->out_cons_same++ > 1) {
+               eoiflag = XEN_EOI_FLAG_SPURIOUS;
+       }
+
+       xen_irq_lateeoi(xencons->irq, eoiflag);
+
        return recv;
 }
 
@@ -386,7 +410,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
        if (ret)
                return ret;
        info->evtchn = evtchn;
-       irq = bind_evtchn_to_irq(evtchn);
+       irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
        if (irq < 0)
                return irq;
        info->irq = irq;
@@ -551,7 +575,7 @@ static int __init xen_hvc_init(void)
                        return r;
 
                info = vtermno_to_xencons(HVC_COOKIE);
-               info->irq = bind_evtchn_to_irq(info->evtchn);
+               info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
        }
        if (info->irq < 0)
                info->irq = 0; /* NO_IRQ */
index 7e0884ecc74f522ba7ac1a9a7198e8977daf582c..23ba1fc99df8b369ed6318fa85581901fb815d8f 100644 (file)
@@ -140,6 +140,8 @@ struct n_hdlc {
        struct n_hdlc_buf_list  rx_buf_list;
        struct n_hdlc_buf_list  tx_free_buf_list;
        struct n_hdlc_buf_list  rx_free_buf_list;
+       struct work_struct      write_work;
+       struct tty_struct       *tty_for_write_work;
 };
 
 /*
@@ -154,6 +156,7 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
 /* Local functions */
 
 static struct n_hdlc *n_hdlc_alloc(void);
+static void n_hdlc_tty_write_work(struct work_struct *work);
 
 /* max frame size for memory allocations */
 static int maxframe = 4096;
@@ -210,6 +213,8 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
        wake_up_interruptible(&tty->read_wait);
        wake_up_interruptible(&tty->write_wait);
 
+       cancel_work_sync(&n_hdlc->write_work);
+
        n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
        n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
        n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
@@ -241,6 +246,8 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
                return -ENFILE;
        }
 
+       INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work);
+       n_hdlc->tty_for_write_work = tty;
        tty->disc_data = n_hdlc;
        tty->receive_room = 65536;
 
@@ -334,6 +341,20 @@ check_again:
                goto check_again;
 }      /* end of n_hdlc_send_frames() */
 
+/**
+ * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup
+ * @work: pointer to work_struct
+ *
+ * Called when low level device driver can accept more send data.
+ */
+static void n_hdlc_tty_write_work(struct work_struct *work)
+{
+       struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
+       struct tty_struct *tty = n_hdlc->tty_for_write_work;
+
+       n_hdlc_send_frames(n_hdlc, tty);
+}      /* end of n_hdlc_tty_write_work() */
+
 /**
  * n_hdlc_tty_wakeup - Callback for transmit wakeup
  * @tty: pointer to associated tty instance data
@@ -344,7 +365,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
 {
        struct n_hdlc *n_hdlc = tty->disc_data;
 
-       n_hdlc_send_frames(n_hdlc, tty);
+       schedule_work(&n_hdlc->write_work);
 }      /* end of n_hdlc_tty_wakeup() */
 
 /**
index f1324fe99378dbb6091fee21d98ef6eea902ce56..92e3433276f8aea24b17399bbc04de17ee6c62d4 100644 (file)
@@ -727,10 +727,24 @@ static acpi_status acpi_serdev_add_device(acpi_handle handle, u32 level,
 static int acpi_serdev_register_devices(struct serdev_controller *ctrl)
 {
        acpi_status status;
+       bool skip;
+       int ret;
 
        if (!has_acpi_companion(ctrl->dev.parent))
                return -ENODEV;
 
+       /*
+        * Skip registration on boards where the ACPI tables are known to
+        * contain buggy devices. Note serdev_controller_add() must still
+        * succeed in this case, so that the proper serdev devices can be
+        * added "manually" later.
+        */
+       ret = acpi_quirk_skip_serdev_enumeration(ctrl->dev.parent, &skip);
+       if (ret)
+               return ret;
+       if (skip)
+               return 0;
+
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                     SERDEV_ACPI_MAX_SCAN_DEPTH,
                                     acpi_serdev_add_device, NULL, ctrl, NULL);
index 7f656fac503fef0dd15521b85ab02feec0d70750..5163d60756b7332845cdb2b1b518b7a592b8bed2 100644 (file)
@@ -237,6 +237,7 @@ struct brcmuart_priv {
        u32             rx_err;
        u32             rx_timeout;
        u32             rx_abort;
+       u32             saved_mctrl;
 };
 
 static struct dentry *brcmuart_debugfs_root;
@@ -1133,16 +1134,27 @@ static int brcmuart_remove(struct platform_device *pdev)
 static int __maybe_unused brcmuart_suspend(struct device *dev)
 {
        struct brcmuart_priv *priv = dev_get_drvdata(dev);
+       struct uart_8250_port *up = serial8250_get_port(priv->line);
+       struct uart_port *port = &up->port;
 
        serial8250_suspend_port(priv->line);
        clk_disable_unprepare(priv->baud_mux_clk);
 
+       /*
+        * This will prevent resume from enabling RTS before the
+        *  baud rate has been resored.
+        */
+       priv->saved_mctrl = port->mctrl;
+       port->mctrl = 0;
+
        return 0;
 }
 
 static int __maybe_unused brcmuart_resume(struct device *dev)
 {
        struct brcmuart_priv *priv = dev_get_drvdata(dev);
+       struct uart_8250_port *up = serial8250_get_port(priv->line);
+       struct uart_port *port = &up->port;
        int ret;
 
        ret = clk_prepare_enable(priv->baud_mux_clk);
@@ -1165,6 +1177,7 @@ static int __maybe_unused brcmuart_resume(struct device *dev)
                start_rx_dma(serial8250_get_port(priv->line));
        }
        serial8250_resume_port(priv->line);
+       port->mctrl = priv->saved_mctrl;
        return 0;
 }
 
index 31c9e83ea3cb2c5bcf62aea19e8957d7fff284a4..251f0018ae8cad4874aab6e0e9a58dd6f257ba1d 100644 (file)
@@ -290,25 +290,6 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
        }
 }
 
-static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
-                             struct fintek_8250 *pdata)
-{
-       sio_write_reg(pdata, LDN, pdata->index);
-
-       switch (pdata->pid) {
-       case CHIP_ID_F81966:
-       case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
-               sio_write_mask_reg(pdata, F81866_UART_CLK,
-                       F81866_UART_CLK_MASK,
-                       F81866_UART_CLK_14_769MHZ);
-
-               uart->port.uartclk = 921600 * 16;
-               break;
-       default: /* leave clock speed untouched */
-               break;
-       }
-}
-
 static void fintek_8250_set_termios(struct uart_port *port,
                                    struct ktermios *termios,
                                    struct ktermios *old)
@@ -430,7 +411,6 @@ static int probe_setup_port(struct fintek_8250 *pdata,
 
                                fintek_8250_set_irq_mode(pdata, level_mode);
                                fintek_8250_set_max_fifo(pdata);
-                               fintek_8250_goto_highspeed(uart, pdata);
 
                                fintek_8250_exit_key(addr[i]);
 
index 5d43de143f3399459e2d20a2d70bd45c7a7d9a41..60f8fffdfd7765ada0d9a310fa211a088d746ce6 100644 (file)
@@ -1324,29 +1324,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
 {
        int scr;
        int lcr;
-       int actual_baud;
-       int tolerance;
 
-       for (scr = 5 ; scr <= 15 ; scr++) {
-               actual_baud = 921600 * 16 / scr;
-               tolerance = actual_baud / 50;
+       for (scr = 16; scr > 4; scr--) {
+               unsigned int maxrate = port->uartclk / scr;
+               unsigned int divisor = max(maxrate / baud, 1U);
+               int delta = maxrate / divisor - baud;
 
-               if ((baud < actual_baud + tolerance) &&
-                       (baud > actual_baud - tolerance)) {
+               if (baud > maxrate + baud / 50)
+                       continue;
 
+               if (delta > baud / 50)
+                       divisor++;
+
+               if (divisor > 0xffff)
+                       continue;
+
+               /* Update delta due to possible divisor change */
+               delta = maxrate / divisor - baud;
+               if (abs(delta) < baud / 50) {
                        lcr = serial_port_in(port, UART_LCR);
                        serial_port_out(port, UART_LCR, lcr | 0x80);
-
-                       serial_port_out(port, UART_DLL, 1);
-                       serial_port_out(port, UART_DLM, 0);
+                       serial_port_out(port, UART_DLL, divisor & 0xff);
+                       serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
                        serial_port_out(port, 2, 16 - scr);
                        serial_port_out(port, UART_LCR, lcr);
                        return;
-               } else if (baud > actual_baud) {
-                       break;
                }
        }
-       serial8250_do_set_divisor(port, baud, quot, quot_frac);
 }
 static int pci_pericom_setup(struct serial_private *priv,
                  const struct pciserial_board *board,
@@ -2291,12 +2295,19 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
                .setup      = pci_pericom_setup_four_at_eight,
        },
        {
-               .vendor     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               .vendor     = PCI_VENDOR_ID_ACCESIO,
                .device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
                .subvendor  = PCI_ANY_ID,
                .subdevice  = PCI_ANY_ID,
                .setup      = pci_pericom_setup_four_at_eight,
        },
+       {
+               .vendor     = PCI_VENDOR_ID_ACCESIO,
+               .device     = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+               .subvendor  = PCI_ANY_ID,
+               .subdevice  = PCI_ANY_ID,
+               .setup      = pci_pericom_setup_four_at_eight,
+       },
        {
                .vendor     = PCI_VENDOR_ID_ACCESIO,
                .device     = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
index 5775cbff8f6ebf5916b61087384901b0ed684b4e..46e2079ad1aa2021d8a11ab3181a2ec9ca580ad4 100644 (file)
@@ -2024,13 +2024,6 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned char mcr;
 
-       if (port->rs485.flags & SER_RS485_ENABLED) {
-               if (serial8250_in_MCR(up) & UART_MCR_RTS)
-                       mctrl |= TIOCM_RTS;
-               else
-                       mctrl &= ~TIOCM_RTS;
-       }
-
        mcr = serial8250_TIOCM_to_MCR(mctrl);
 
        mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
index 6ff94cfcd9dbd3f339c4d880c86b55407ffdbf1c..fc543ac97c13193b13218e71bb8dba88892622d1 100644 (file)
@@ -1533,7 +1533,7 @@ config SERIAL_LITEUART
        tristate "LiteUART serial port support"
        depends on HAS_IOMEM
        depends on OF || COMPILE_TEST
-       depends on LITEX
+       depends on LITEX || COMPILE_TEST
        select SERIAL_CORE
        help
          This driver is for the FPGA-based LiteUART serial controller from LiteX
index d361cd84ff8cfecf95b4d0b6b433796bdf700c24..52518a606c06a2c597a47c1b9c3d4735b2072a9c 100644 (file)
@@ -2947,6 +2947,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
 
 static const struct acpi_device_id __maybe_unused sbsa_uart_acpi_match[] = {
        { "ARMH0011", 0 },
+       { "ARMHB000", 0 },
        {},
 };
 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
index b1e7190ae4836e4e34ffc05bc048de18f110c0c5..ac5112def40d1df35d93ef8eb453fe9d3884d95c 100644 (file)
@@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
 OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
 EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
 EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
 
index dbc0559a9157546c6377a433a7071d0b0dd74251..2941659e52747f9d6b1833789b36dcdfc3122532 100644 (file)
@@ -270,8 +270,10 @@ static int liteuart_probe(struct platform_device *pdev)
 
        /* get membase */
        port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
-       if (IS_ERR(port->membase))
-               return PTR_ERR(port->membase);
+       if (IS_ERR(port->membase)) {
+               ret = PTR_ERR(port->membase);
+               goto err_erase_id;
+       }
 
        /* values not from device tree */
        port->dev = &pdev->dev;
@@ -285,7 +287,18 @@ static int liteuart_probe(struct platform_device *pdev)
        port->line = dev_id;
        spin_lock_init(&port->lock);
 
-       return uart_add_one_port(&liteuart_driver, &uart->port);
+       platform_set_drvdata(pdev, port);
+
+       ret = uart_add_one_port(&liteuart_driver, &uart->port);
+       if (ret)
+               goto err_erase_id;
+
+       return 0;
+
+err_erase_id:
+       xa_erase(&liteuart_array, uart->id);
+
+       return ret;
 }
 
 static int liteuart_remove(struct platform_device *pdev)
@@ -293,6 +306,7 @@ static int liteuart_remove(struct platform_device *pdev)
        struct uart_port *port = platform_get_drvdata(pdev);
        struct liteuart_port *uart = to_liteuart_port(port);
 
+       uart_remove_one_port(&liteuart_driver, port);
        xa_erase(&liteuart_array, uart->id);
 
        return 0;
index fcef7a961430b37aa5a4b0f7940912f575f52fb6..489d19274f9ade45497d86b8da52623b4ccd062a 100644 (file)
@@ -598,6 +598,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
        u32 val;
        int ret;
 
+       if (IS_ENABLED(CONFIG_CONSOLE_POLL))
+               return;
+
        if (!dma->chan)
                return;
 
index 45e2e4109acd0d8a3557b379674bb2726a95310c..b6223fab0687da64f947e70f20b72b6b419590ff 100644 (file)
@@ -1506,7 +1506,7 @@ static struct tegra_uart_chip_data tegra20_uart_chip_data = {
        .fifo_mode_enable_status        = false,
        .uart_max_port                  = 5,
        .max_dma_burst_bytes            = 4,
-       .error_tolerance_low_range      = 0,
+       .error_tolerance_low_range      = -4,
        .error_tolerance_high_range     = 4,
 };
 
@@ -1517,7 +1517,7 @@ static struct tegra_uart_chip_data tegra30_uart_chip_data = {
        .fifo_mode_enable_status        = false,
        .uart_max_port                  = 5,
        .max_dma_burst_bytes            = 4,
-       .error_tolerance_low_range      = 0,
+       .error_tolerance_low_range      = -4,
        .error_tolerance_high_range     = 4,
 };
 
index 1e738f265eeaa210626df63cb124145f34d90751..61e3dd0222af141bb9f568299165889cb3ce25b5 100644 (file)
@@ -1075,6 +1075,11 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
                goto out;
 
        if (!tty_io_error(tty)) {
+               if (uport->rs485.flags & SER_RS485_ENABLED) {
+                       set &= ~TIOCM_RTS;
+                       clear &= ~TIOCM_RTS;
+               }
+
                uart_update_mctrl(uport, set, clear);
                ret = 0;
        }
@@ -1549,6 +1554,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
 {
        struct uart_state *state = container_of(port, struct uart_state, port);
        struct uart_port *uport = uart_port_check(state);
+       char *buf;
 
        /*
         * At this point, we stop accepting input.  To do this, we
@@ -1570,8 +1576,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
         */
        tty_port_set_suspended(port, 0);
 
-       uart_change_pm(state, UART_PM_STATE_OFF);
+       /*
+        * Free the transmit buffer.
+        */
+       spin_lock_irq(&uport->lock);
+       buf = state->xmit.buf;
+       state->xmit.buf = NULL;
+       spin_unlock_irq(&uport->lock);
 
+       if (buf)
+               free_page((unsigned long)buf);
+
+       uart_change_pm(state, UART_PM_STATE_OFF);
 }
 
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
index 1f3b4a1422126bff26011c4ff7fd48bfa1f38e8c..f9af7ebe003d7b917c24cc83f4b8a47ee4a67263 100644 (file)
@@ -337,19 +337,6 @@ static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
        cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
 }
 
-static void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
-{
-       struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
-       int current_trb = priv_req->start_trb;
-
-       while (current_trb != priv_req->end_trb) {
-               cdns3_ep_inc_deq(priv_ep);
-               current_trb = priv_ep->dequeue;
-       }
-
-       cdns3_ep_inc_deq(priv_ep);
-}
-
 /**
  * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
  * @priv_dev: Extended gadget object
@@ -1517,10 +1504,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
 
                trb = priv_ep->trb_pool + priv_ep->dequeue;
 
-               /* Request was dequeued and TRB was changed to TRB_LINK. */
-               if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
+               /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */
+               while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) {
                        trace_cdns3_complete_trb(priv_ep, trb);
-                       cdns3_move_deq_to_next_trb(priv_req);
+                       cdns3_ep_inc_deq(priv_ep);
+                       trb = priv_ep->trb_pool + priv_ep->dequeue;
                }
 
                if (!request->stream_id) {
index 27df0c6978978cc5f2f1aa6ff7e8922f06d7e1c0..e85bf768c66daadc0d2382f00a8da2699b994eaa 100644 (file)
@@ -1541,15 +1541,27 @@ static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
 {
        struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
        struct cdns *cdns = dev_get_drvdata(pdev->dev);
+       unsigned long flags;
 
        trace_cdnsp_pullup(is_on);
 
+       /*
+        * Disable events handling while controller is being
+        * enabled/disabled.
+        */
+       disable_irq(cdns->dev_irq);
+       spin_lock_irqsave(&pdev->lock, flags);
+
        if (!is_on) {
                cdnsp_reset_device(pdev);
                cdns_clear_vbus(cdns);
        } else {
                cdns_set_vbus(cdns);
        }
+
+       spin_unlock_irqrestore(&pdev->lock, flags);
+       enable_irq(cdns->dev_irq);
+
        return 0;
 }
 
index ad9aee3f1e3982b1c07023772c088b3c394e07df..97866bfb2da9d941c9aa915f78f8c79b26060fe8 100644 (file)
@@ -987,6 +987,9 @@ int cdnsp_endpoint_init(struct cdnsp_device *pdev,
 
        /* Set up the endpoint ring. */
        pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+       if (!pep->ring)
+               return -ENOMEM;
+
        pep->skip = false;
 
        /* Fill the endpoint context */
index 1b1438457fb0437a862430e2ed6743688c4b7aad..e45c3d6e1536cf6dba325fef8ce925c35f0eb9e8 100644 (file)
@@ -1029,6 +1029,8 @@ static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
                return;
        }
 
+       *status = 0;
+
        cdnsp_finish_td(pdev, td, event, pep, status);
 }
 
@@ -1523,7 +1525,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
        spin_lock_irqsave(&pdev->lock, flags);
 
        if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
-               cdnsp_died(pdev);
+               /*
+                * While removing or stopping driver there may still be deferred
+                * not handled interrupt which should not be treated as error.
+                * Driver should simply ignore it.
+                */
+               if (pdev->gadget_driver)
+                       cdnsp_died(pdev);
+
                spin_unlock_irqrestore(&pdev->lock, flags);
                return IRQ_HANDLED;
        }
index 6a2571c6aa9ed7215a52c7f3cc6b4300dd858a83..5983dfb996537ef344eb6bcb086d4a479ec48214 100644 (file)
@@ -57,9 +57,9 @@ DECLARE_EVENT_CLASS(cdnsp_log_ep,
                __entry->first_prime_det = pep->stream_info.first_prime_det;
                __entry->drbls_count = pep->stream_info.drbls_count;
        ),
-       TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num  %d "
+       TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d "
                  "tds %d, first prime: %d drbls %d",
-                 __get_str(name), __entry->state, __entry->stream_id,
+                 __get_str(name), __entry->stream_id, __entry->state,
                  __entry->enabled, __entry->num_streams, __entry->td_count,
                  __entry->first_prime_det, __entry->drbls_count)
 );
index 84dadfa726aa6a816283b610ff258e0d2b53990a..9643b905e2d8b38da465ff48028d23274be0a0c5 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 #include "core.h"
 #include "drd.h"
 #include "host-export.h"
index 16b1fd9dc60c959703368f330f75984763dddf93..48bc8a4814ac4e985e118242eecf73202bc7148f 100644 (file)
@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
         * the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
         * (see the end of section 5.6.3), so don't warn about them.
         */
-       maxp = usb_endpoint_maxp(&endpoint->desc);
+       maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
        if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
                dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
                    cfgno, inum, asnum, d->bEndpointAddress);
@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
                maxpacket_maxes = full_speed_maxpacket_maxes;
                break;
        case USB_SPEED_HIGH:
-               /* Bits 12..11 are allowed only for HS periodic endpoints */
+               /* Multiple-transactions bits are allowed only for HS periodic endpoints */
                if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
-                       i = maxp & (BIT(12) | BIT(11));
+                       i = maxp & USB_EP_MAXP_MULT_MASK;
                        maxp &= ~i;
                }
                fallthrough;
index 8239fe7129dd7a3b241813e215c04dd69df20d36..d3c14b5ed4a1f9fc438ce917390234b9d39f2275 100644 (file)
@@ -434,6 +434,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1532, 0x0116), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
+       { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
+
+       /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
+       { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
        { USB_DEVICE(0x17ef, 0xa012), .driver_info =
                        USB_QUIRK_DISCONNECT_SUSPEND },
index c8f18f3ba9e355547e1f1c2ff146f69311905973..c331a5128c2c0e3d12d778d384c490f5be7970d2 100644 (file)
@@ -575,6 +575,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
                ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
                ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
                dwc2_writel(hsotg, ggpio, GGPIO);
+
+               /* ID/VBUS detection startup time */
+               usleep_range(5000, 7000);
        }
 
        retval = dwc2_drd_init(hsotg);
index 9abbd01028c5ffcc3508f37e1776908f9648284d..3cb01cdd02c29870a540a06a0539b1d3adba7448 100644 (file)
@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
        struct dwc3_qcom        *qcom = platform_get_drvdata(pdev);
        struct device_node      *np = pdev->dev.of_node, *dwc3_np;
        struct device           *dev = &pdev->dev;
-       struct property         *prop;
        int                     ret;
 
        dwc3_np = of_get_compatible_child(np, "snps,dwc3");
@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
-       if (!prop) {
-               ret = -ENOMEM;
-               dev_err(dev, "unable to allocate memory for property\n");
-               goto node_put;
-       }
-
-       prop->name = "tx-fifo-resize";
-       ret = of_add_property(dwc3_np, prop);
-       if (ret) {
-               dev_err(dev, "unable to add property\n");
-               goto node_put;
-       }
-
        ret = of_platform_populate(np, NULL, NULL, dev);
        if (ret) {
                dev_err(dev, "failed to register dwc3 core - %d\n", ret);
index 933d77ad0a64212843a1b13ae80ae4fb75f87bbd..4502108069cd9252e9888c10e80989dcce5a3326 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pci_ids.h>
 #include <linux/memblock.h>
 #include <linux/io.h>
-#include <linux/iopoll.h>
 #include <asm/pci-direct.h>
 #include <asm/fixmap.h>
 #include <linux/bcd.h>
@@ -136,9 +135,17 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
 {
        u32 result;
 
-       return readl_poll_timeout_atomic(ptr, result,
-                                        ((result & mask) == done),
-                                        delay, wait);
+       /* Can not use readl_poll_timeout_atomic() for early boot things */
+       do {
+               result = readl(ptr);
+               result &= mask;
+               if (result == done)
+                       return 0;
+               udelay(delay);
+               wait -= delay;
+       } while (wait > 0);
+
+       return -ETIMEDOUT;
 }
 
 static void __init xdbc_bios_handoff(void)
index 504c1cbc255d14a9ba7fefbc1d4f1e10a67f264c..3789c329183ca6e5789ea1c529a6bc8a80759888 100644 (file)
@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        struct usb_function             *f = NULL;
        u8                              endp;
 
+       if (w_length > USB_COMP_EP0_BUFSIZ) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+                       w_length = USB_COMP_EP0_BUFSIZ;
+               } else {
+                       goto done;
+               }
+       }
+
        /* partial re-init of the response message; the function or the
         * gadget might need to intercept e.g. a control-OUT completion
         * when we delegate to it.
@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
        if (!cdev->req)
                return -ENOMEM;
 
-       cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+       cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
        if (!cdev->req->buf)
                goto fail;
 
index e0ad5aed6ac98ed2797acf7150cef7afef109189..6f5d45ef2e39a8caeab230cd42111477e387195a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
 
 #include "u_ether.h"
 
@@ -863,19 +864,23 @@ int gether_register_netdev(struct net_device *net)
 {
        struct eth_dev *dev;
        struct usb_gadget *g;
-       struct sockaddr sa;
        int status;
 
        if (!net->dev.parent)
                return -EINVAL;
        dev = netdev_priv(net);
        g = dev->gadget;
+
+       net->addr_assign_type = NET_ADDR_RANDOM;
+       eth_hw_addr_set(net, dev->dev_mac);
+
        status = register_netdev(net);
        if (status < 0) {
                dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
                return status;
        } else {
                INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+               INFO(dev, "MAC %pM\n", dev->dev_mac);
 
                /* two kinds of host-initiated state changes:
                 *  - iff DATA transfer is active, carrier is "on"
@@ -883,15 +888,6 @@ int gether_register_netdev(struct net_device *net)
                 */
                netif_carrier_off(net);
        }
-       sa.sa_family = net->type;
-       memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
-       rtnl_lock();
-       status = dev_set_mac_address(net, &sa, NULL);
-       rtnl_unlock();
-       if (status)
-               pr_warn("cannot set self ethernet address: %d\n", status);
-       else
-               INFO(dev, "MAC %pM\n", dev->dev_mac);
 
        return status;
 }
index e1d566c9918ae576d55fc322f317d2992193b7f1..6bcbad382580203e0aafbe012c202c3587297a3a 100644 (file)
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
                goto fail_1;
        }
 
-       req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+       req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
        if (!req->buf) {
                err = -ENOMEM;
                stp = 2;
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
        void *data = NULL;
        u16 len = 0;
 
+       if (length > DBGP_REQ_LEN) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(DBGP_REQ_LEN);
+                       length = DBGP_REQ_LEN;
+               } else {
+                       return err;
+               }
+       }
+
+
        if (request == USB_REQ_GET_DESCRIPTOR) {
                switch (value>>8) {
                case USB_DT_DEVICE:
index 78be947502329b2b7c041f7ff32bd179d6ac6cd5..3b58f4fc0a806ef8759c35ebdb3a4c3a6ea3ab08 100644 (file)
@@ -110,6 +110,8 @@ enum ep0_state {
 /* enough for the whole queue: most events invalidate others */
 #define        N_EVENT                 5
 
+#define RBUF_SIZE              256
+
 struct dev_data {
        spinlock_t                      lock;
        refcount_t                      count;
@@ -144,7 +146,7 @@ struct dev_data {
        struct dentry                   *dentry;
 
        /* except this scratch i/o buffer for ep0 */
-       u8                              rbuf [256];
+       u8                              rbuf[RBUF_SIZE];
 };
 
 static inline void get_dev (struct dev_data *data)
@@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
        u16                             w_value = le16_to_cpu(ctrl->wValue);
        u16                             w_length = le16_to_cpu(ctrl->wLength);
 
+       if (w_length > RBUF_SIZE) {
+               if (ctrl->bRequestType & USB_DIR_IN) {
+                       /* Cast away the const, we are going to overwrite on purpose. */
+                       __le16 *temp = (__le16 *)&ctrl->wLength;
+
+                       *temp = cpu_to_le16(RBUF_SIZE);
+                       w_length = RBUF_SIZE;
+               } else {
+                       return value;
+               }
+       }
+
        spin_lock (&dev->lock);
        dev->setup_abort = 0;
        if (dev->state == STATE_DEV_UNCONNECTED) {
index af946c42b6f0a0be25b948a0d0f6b5daf7e1a97b..df3522dab31b5eaac4ab8c24759fc1e4e963e882 100644 (file)
@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
                        continue;
 
                retval = xhci_disable_slot(xhci, i);
+               xhci_free_virt_device(xhci, i);
                if (retval)
                        xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
                                 i, retval);
index 1edef7527c1197520e53f9ddbe5fb72ed8e054ee..edbfa82c656592d13ad6c09ce45387a2cce8613a 100644 (file)
@@ -781,7 +781,7 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 
        ret = xhci_check_bandwidth(hcd, udev);
        if (!ret)
-               INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+               list_del_init(&mtk->bw_ep_chk_list);
 
        return ret;
 }
index 92adf61078644fa0d36fdf19d58f0159face2372..3af01788323117e9851566ca55ba3395b9cf2c16 100644 (file)
@@ -71,6 +71,8 @@
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4           0x161e
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5           0x15d6
 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6           0x15d7
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7           0x161c
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8           0x161f
 
 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI                        0x1042
 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI               0x1142
@@ -330,7 +332,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
            pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
-           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
+           pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
                xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
 
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
index 311597bba80e2a4d469277042cbde721e93af335..d0b6806275e01a92e21fa79e575fc4eb3fea72a7 100644 (file)
@@ -366,7 +366,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 /* Must be called with xhci->lock held, releases and aquires lock back */
 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 {
-       u32 temp_32;
+       struct xhci_segment *new_seg    = xhci->cmd_ring->deq_seg;
+       union xhci_trb *new_deq         = xhci->cmd_ring->dequeue;
+       u64 crcr;
        int ret;
 
        xhci_dbg(xhci, "Abort command ring\n");
@@ -375,13 +377,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 
        /*
         * The control bits like command stop, abort are located in lower
-        * dword of the command ring control register. Limit the write
-        * to the lower dword to avoid corrupting the command ring pointer
-        * in case if the command ring is stopped by the time upper dword
-        * is written.
+        * dword of the command ring control register.
+        * Some controllers require all 64 bits to be written to abort the ring.
+        * Make sure the upper dword is valid, pointing to the next command,
+        * avoiding corrupting the command ring pointer in case the command ring
+        * is stopped by the time the upper dword is written.
         */
-       temp_32 = readl(&xhci->op_regs->cmd_ring);
-       writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+       next_trb(xhci, NULL, &new_seg, &new_deq);
+       if (trb_is_link(new_deq))
+               next_trb(xhci, NULL, &new_seg, &new_deq);
+
+       crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
+       xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
 
        /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
         * completion of the Command Abort operation. If CRR is not negated in 5
@@ -1518,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
                /* Delete default control endpoint resources */
                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
-       xhci_free_virt_device(xhci, slot_id);
 }
 
 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
index 902f410874e8eac93552b3821dc02dc0dad3f41f..f5b1bcc875dedce9834820ae9d7ea492019707e6 100644 (file)
@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_slot_ctx *slot_ctx;
        int i, ret;
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * We called pm_runtime_get_noresume when the device was attached.
         * Decrement the counter here to allow controller to runtime suspend
@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_put_noidle(hcd->self.controller);
-#endif
 
        ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
        /* If the host is halted due to driver unload, we still need to free the
@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
        }
        virt_dev->udev = NULL;
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        u32 state;
        int ret = 0;
 
-       command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+       command = xhci_alloc_command(xhci, true, GFP_KERNEL);
        if (!command)
                return -ENOMEM;
 
@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
        }
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
+
+       wait_for_completion(command->completion);
+
+       if (command->status != COMP_SUCCESS)
+               xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
+                         slot_id, command->status);
+
+       xhci_free_command(xhci, command);
+
        return ret;
 }
 
@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 
        xhci_debugfs_create_slot(xhci, slot_id);
 
-#ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
         * If resetting upon resume, we can't put the controller into runtime
         * suspend if there is a device attached.
         */
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                pm_runtime_get_noresume(hcd->self.controller);
-#endif
 
        /* Is this a LS or FS device under a HS hub? */
        /* Hub or peripherial? */
        return 1;
 
 disable_slot:
-       ret = xhci_disable_slot(xhci, udev->slot_id);
-       if (ret)
-               xhci_free_virt_device(xhci, udev->slot_id);
+       xhci_disable_slot(xhci, udev->slot_id);
+       xhci_free_virt_device(xhci, udev->slot_id);
 
        return 0;
 }
@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
                mutex_unlock(&xhci->mutex);
                ret = xhci_disable_slot(xhci, udev->slot_id);
+               xhci_free_virt_device(xhci, udev->slot_id);
                if (!ret)
                        xhci_alloc_dev(hcd, udev);
                kfree(command->completion);
index 7705328034cabc59ead943031ca76993530337fb..8a60c0d56863eef53e3d383e34fd100ac0c5c941 100644 (file)
@@ -1635,6 +1635,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
 
        /*  2 banks of GPIO - One for the pins taken from each serial port */
        if (intf_num == 0) {
+               priv->gc.ngpio = 2;
+
                if (mode.eci == CP210X_PIN_MODE_MODEM) {
                        /* mark all GPIOs of this interface as reserved */
                        priv->gpio_altfunc = 0xff;
@@ -1645,8 +1647,9 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
                priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
                                                CP210X_ECI_GPIO_MODE_MASK) >>
                                                CP210X_ECI_GPIO_MODE_OFFSET);
-               priv->gc.ngpio = 2;
        } else if (intf_num == 1) {
+               priv->gc.ngpio = 3;
+
                if (mode.sci == CP210X_PIN_MODE_MODEM) {
                        /* mark all GPIOs of this interface as reserved */
                        priv->gpio_altfunc = 0xff;
@@ -1657,7 +1660,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
                priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
                                                CP210X_SCI_GPIO_MODE_MASK) >>
                                                CP210X_SCI_GPIO_MODE_OFFSET);
-               priv->gc.ngpio = 3;
        } else {
                return -ENODEV;
        }
index 546fce4617a8548fb755a152132a36d3ba344893..42420bfc983c2d5edb4879a8814dcb34d76fe061 100644 (file)
@@ -1219,6 +1219,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff),    /* Telit LN920 (ECM) */
          .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff),    /* Telit FN990 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff),    /* Telit FN990 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff),    /* Telit FN990 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff),    /* Telit FN990 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
index 7f2f3ff1b39112e72b683873c2a031fdb155c644..59d4fa2443f2b44449c53a8cb4ccd250e0664189 100644 (file)
@@ -324,6 +324,7 @@ struct tcpm_port {
 
        bool attached;
        bool connected;
+       bool registered;
        bool pd_supported;
        enum typec_port_type port_type;
 
@@ -4110,11 +4111,7 @@ static void run_state_machine(struct tcpm_port *port)
                                       tcpm_try_src(port) ? SRC_TRY
                                                          : SNK_ATTACHED,
                                       0);
-               else
-                       /* Wait for VBUS, but not forever */
-                       tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
                break;
-
        case SRC_TRY:
                port->try_src_count++;
                tcpm_set_cc(port, tcpm_rp_cc(port));
@@ -6295,7 +6292,8 @@ static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
 
-       kthread_queue_work(port->wq, &port->state_machine);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->state_machine);
        return HRTIMER_NORESTART;
 }
 
@@ -6303,7 +6301,8 @@ static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *time
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
 
-       kthread_queue_work(port->wq, &port->vdm_state_machine);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->vdm_state_machine);
        return HRTIMER_NORESTART;
 }
 
@@ -6311,7 +6310,8 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
 
-       kthread_queue_work(port->wq, &port->enable_frs);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->enable_frs);
        return HRTIMER_NORESTART;
 }
 
@@ -6319,7 +6319,8 @@ static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
 {
        struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
 
-       kthread_queue_work(port->wq, &port->send_discover_work);
+       if (port->registered)
+               kthread_queue_work(port->wq, &port->send_discover_work);
        return HRTIMER_NORESTART;
 }
 
@@ -6407,6 +6408,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        typec_port_register_altmodes(port->typec_port,
                                     &tcpm_altmode_ops, port,
                                     port->port_altmode, ALTMODE_DISCOVERY_MAX);
+       port->registered = true;
 
        mutex_lock(&port->lock);
        tcpm_init(port);
@@ -6428,6 +6430,9 @@ void tcpm_unregister_port(struct tcpm_port *port)
 {
        int i;
 
+       port->registered = false;
+       kthread_destroy_worker(port->wq);
+
        hrtimer_cancel(&port->send_discover_timer);
        hrtimer_cancel(&port->enable_frs_timer);
        hrtimer_cancel(&port->vdm_state_machine_timer);
@@ -6439,7 +6444,6 @@ void tcpm_unregister_port(struct tcpm_port *port)
        typec_unregister_port(port->typec_port);
        usb_role_switch_put(port->role_sw);
        tcpm_debugfs_exit(port);
-       kthread_destroy_worker(port->wq);
 }
 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
 
index 7332a74a4b00c504014eb1d39b4873017dcab1a3..09bbe53c3ac4ec193717af888f58a0ba1347e636 100644 (file)
@@ -404,7 +404,8 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
                goto msg_err;
 
        while (mdev->id_table[i].device) {
-               supported_classes |= BIT(mdev->id_table[i].device);
+               if (mdev->id_table[i].device <= 63)
+                       supported_classes |= BIT_ULL(mdev->id_table[i].device);
                i++;
        }
 
index c9204c62f339c33ab484f60166d03816ecbf15d8..eddcb64a910acf2e3beafed805581ba10d1a699e 100644 (file)
@@ -655,7 +655,8 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
 {
        struct vduse_dev *dev = vdpa_to_vduse(vdpa);
 
-       if (len > dev->config_size - offset)
+       if (offset > dev->config_size ||
+           len > dev->config_size - offset)
                return;
 
        memcpy(buf, dev->config + offset, len);
@@ -975,7 +976,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
                        break;
 
                ret = -EINVAL;
-               if (config.length == 0 ||
+               if (config.offset > dev->config_size ||
+                   config.length == 0 ||
                    config.length > dev->config_size - config.offset)
                        break;
 
index 56cd551e0e04dfb5840cddc5a227cffdee1b904f..362f91ec884585c929b0f1cfb22913a6b9c94793 100644 (file)
@@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                        version = cpu_to_le16(0x0201);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &version + (pos - OPREGION_VERSION),
+                                           (u8 *)&version +
+                                           (pos - OPREGION_VERSION),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
@@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
                                          OPREGION_SIZE : 0);
 
                if (igd_opregion_shift_copy(buf, &off,
-                                           &rvda + (pos - OPREGION_RVDA),
+                                           (u8 *)&rvda + (pos - OPREGION_RVDA),
                                            &pos, &remaining, bytes))
                        return -EFAULT;
        }
index 82fb75464f923d47a225f8262595af0e51b51753..735d1d344af9d48277508e565221182c695858f0 100644 (file)
@@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
 }
 #endif /* CONFIG_VFIO_NOIOMMU */
 
-/**
+/*
  * IOMMU driver registration
  */
 int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
@@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                                     unsigned long action, void *data);
 static void vfio_group_get(struct vfio_group *group);
 
-/**
+/*
  * Container objects - containers are created when /dev/vfio/vfio is
  * opened, but their lifecycle extends until the last user is done, so
  * it's freed via kref.  Must support container/group/device being
@@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container)
        kref_put(&container->kref, vfio_container_release);
 }
 
-/**
+/*
  * Group objects - create, release, get, put, search
  */
 static struct vfio_group *
@@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
        return group;
 }
 
-/**
+/*
  * Device objects - create, release, get, put, search
  */
 /* Device reference always implies a group reference */
@@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
        return ret;
 }
 
-/**
+/*
  * Async device support
  */
 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
@@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-/**
+/*
  * VFIO driver API
  */
 void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
 
-/**
+/*
  * Get a reference to the vfio_device for a device.  Even if the
  * caller thinks they own the device, they could be racing with a
  * release call path, so we can't trust drvdata for the shortcut.
@@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
 }
 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
 
-/**
+/*
  * VFIO base fd, /dev/vfio/vfio
  */
 static long vfio_ioctl_check_extension(struct vfio_container *container,
@@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = {
        .compat_ioctl   = compat_ptr_ioctl,
 };
 
-/**
+/*
  * VFIO Group fd, /dev/vfio/$GROUP
  */
 static void __vfio_group_unset_container(struct vfio_group *group)
@@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = {
        .release        = vfio_group_fops_release,
 };
 
-/**
+/*
  * VFIO Device fd
  */
 static int vfio_device_fops_release(struct inode *inode, struct file *filep)
@@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = {
        .mmap           = vfio_device_fops_mmap,
 };
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  *
  * The protocol includes:
@@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
 }
 EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
 
-/**
+/*
  * External user API, exported by symbols to be linked dynamically.
  * The external user passes in a device pointer
  * to verify that:
@@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
 }
 EXPORT_SYMBOL_GPL(vfio_external_check_extension);
 
-/**
+/*
  * Sub-module support
  */
 /*
@@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
 }
 EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
 
-/**
+/*
  * Module/class support
  */
 static char *vfio_devnode(struct device *dev, umode_t *mode)
index 29cced1cd27784a708df783b2f81e2340c86f145..e3c4f059b21a202b36e8a18930432b546bac9bc2 100644 (file)
@@ -197,7 +197,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
        struct vdpa_device *vdpa = v->vdpa;
        long size = vdpa->config->get_config_size(vdpa);
 
-       if (c->len == 0)
+       if (c->len == 0 || c->off > size)
                return -EINVAL;
 
        if (c->len > size - c->off)
index ef9c57ce090663d5f35c0f80dd425edcaa9a4c44..9a49ea6b5112fb49621f1aa792cd68f6a0f2eda4 100644 (file)
@@ -366,11 +366,17 @@ static void vgacon_init(struct vc_data *c, int init)
        struct uni_pagedir *p;
 
        /*
-        * We cannot be loaded as a module, therefore init is always 1,
-        * but vgacon_init can be called more than once, and init will
-        * not be 1.
+        * We cannot be loaded as a module, therefore init will be 1
+        * if we are the default console, however if we are a fallback
+        * console, for example if fbcon has failed registration, then
+        * init will be 0, so we need to make sure our boot parameters
+        * have been copied to the console structure for vgacon_resize
+        * ultimately called by vc_resize.  Any subsequent calls to
+        * vgacon_init init will have init set to 0 too.
         */
        c->vc_can_do_color = vga_can_do_color;
+       c->vc_scan_lines = vga_scan_lines;
+       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
 
        /* set dimensions manually if init != 0 since vc_resize() will fail */
        if (init) {
@@ -379,8 +385,6 @@ static void vgacon_init(struct vc_data *c, int init)
        } else
                vc_resize(c, vga_video_num_columns, vga_video_num_lines);
 
-       c->vc_scan_lines = vga_scan_lines;
-       c->vc_font.height = c->vc_cell_height = vga_video_font_height;
        c->vc_complement_mask = 0x7700;
        if (vga_512_chars)
                c->vc_hi_font_mask = 0x0800;
index 6d2614e34470f463bb553e7e9e80f7bb0a0cef4a..028b05d4454604d5aee636895f8fcfee77a2b46a 100644 (file)
@@ -268,7 +268,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev)
        size_t max_segment_size = SIZE_MAX;
 
        if (vring_use_dma_api(vdev))
-               max_segment_size = dma_max_mapping_size(&vdev->dev);
+               max_segment_size = dma_max_mapping_size(vdev->dev.parent);
 
        return max_segment_size;
 }
index a78704ae36186649ff2d0805cacc53700d339da0..46d9295d9a6e4a58a2e6d7658e270de31c48eed8 100644 (file)
@@ -1251,6 +1251,12 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
 }
 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
 
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+       return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 {
        struct evtchn_bind_ipi bind_ipi;
index cb6ad61eec3bf5f30d5d6477b0bb7635f131b4bc..afe4b803f84b46d319e4885fdbc6ea8a1919cf40 100644 (file)
@@ -514,8 +514,9 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
        if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
                down_write(&vnode->volume->cell->fs_open_mmaps_lock);
 
-               list_add_tail(&vnode->cb_mmap_link,
-                             &vnode->volume->cell->fs_open_mmaps);
+               if (list_empty(&vnode->cb_mmap_link))
+                       list_add_tail(&vnode->cb_mmap_link,
+                                     &vnode->volume->cell->fs_open_mmaps);
 
                up_write(&vnode->volume->cell->fs_open_mmaps_lock);
        }
index d110def8aa8eb993212c00ed161089eba89a7545..34c68724c98bec4070dd56de816c07ea78c2db99 100644 (file)
@@ -667,6 +667,7 @@ static void afs_i_init_once(void *_vnode)
        INIT_LIST_HEAD(&vnode->pending_locks);
        INIT_LIST_HEAD(&vnode->granted_locks);
        INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
+       INIT_LIST_HEAD(&vnode->cb_mmap_link);
        seqlock_init(&vnode->cb_lock);
 }
 
index 9c81cf611d659fcbdb61c1307c1b260836dfdba6..f6f1cbffef9e8c8cc302c2b2c5b8d55a6172fe3b 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,8 +181,9 @@ struct poll_iocb {
        struct file             *file;
        struct wait_queue_head  *head;
        __poll_t                events;
-       bool                    done;
        bool                    cancelled;
+       bool                    work_scheduled;
+       bool                    work_need_resched;
        struct wait_queue_entry wait;
        struct work_struct      work;
 };
@@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
        iocb_put(iocb);
 }
 
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken.  Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+       wait_queue_head_t *head;
+
+       /*
+        * While we hold the waitqueue lock and the waitqueue is nonempty,
+        * wake_up_pollfree() will wait for us.  However, taking the waitqueue
+        * lock in the first place can race with the waitqueue being freed.
+        *
+        * We solve this as eventpoll does: by taking advantage of the fact that
+        * all users of wake_up_pollfree() will RCU-delay the actual free.  If
+        * we enter rcu_read_lock() and see that the pointer to the queue is
+        * non-NULL, we can then lock it without the memory being freed out from
+        * under us, then check whether the request is still on the queue.
+        *
+        * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+        * case the caller deletes the entry from the queue, leaving it empty.
+        * In that case, only RCU prevents the queue memory from being freed.
+        */
+       rcu_read_lock();
+       head = smp_load_acquire(&req->head);
+       if (head) {
+               spin_lock(&head->lock);
+               if (!list_empty(&req->wait.entry))
+                       return true;
+               spin_unlock(&head->lock);
+       }
+       rcu_read_unlock();
+       return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+       spin_unlock(&req->head->lock);
+       rcu_read_unlock();
+}
+
 static void aio_poll_complete_work(struct work_struct *work)
 {
        struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
         * avoid further branches in the fast path.
         */
        spin_lock_irq(&ctx->ctx_lock);
-       if (!mask && !READ_ONCE(req->cancelled)) {
-               add_wait_queue(req->head, &req->wait);
-               spin_unlock_irq(&ctx->ctx_lock);
-               return;
-       }
+       if (poll_iocb_lock_wq(req)) {
+               if (!mask && !READ_ONCE(req->cancelled)) {
+                       /*
+                        * The request isn't actually ready to be completed yet.
+                        * Reschedule completion if another wakeup came in.
+                        */
+                       if (req->work_need_resched) {
+                               schedule_work(&req->work);
+                               req->work_need_resched = false;
+                       } else {
+                               req->work_scheduled = false;
+                       }
+                       poll_iocb_unlock_wq(req);
+                       spin_unlock_irq(&ctx->ctx_lock);
+                       return;
+               }
+               list_del_init(&req->wait.entry);
+               poll_iocb_unlock_wq(req);
+       } /* else, POLLFREE has freed the waitqueue, so we must complete */
        list_del_init(&iocb->ki_list);
        iocb->ki_res.res = mangle_poll(mask);
-       req->done = true;
        spin_unlock_irq(&ctx->ctx_lock);
 
        iocb_put(iocb);
@@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
        struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
        struct poll_iocb *req = &aiocb->poll;
 
-       spin_lock(&req->head->lock);
-       WRITE_ONCE(req->cancelled, true);
-       if (!list_empty(&req->wait.entry)) {
-               list_del_init(&req->wait.entry);
-               schedule_work(&aiocb->poll.work);
-       }
-       spin_unlock(&req->head->lock);
+       if (poll_iocb_lock_wq(req)) {
+               WRITE_ONCE(req->cancelled, true);
+               if (!req->work_scheduled) {
+                       schedule_work(&aiocb->poll.work);
+                       req->work_scheduled = true;
+               }
+               poll_iocb_unlock_wq(req);
+       } /* else, the request was force-cancelled by POLLFREE already */
 
        return 0;
 }
@@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
        if (mask && !(mask & req->events))
                return 0;
 
-       list_del_init(&req->wait.entry);
-
-       if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+       /*
+        * Complete the request inline if possible.  This requires that three
+        * conditions be met:
+        *   1. An event mask must have been passed.  If a plain wakeup was done
+        *      instead, then mask == 0 and we have to call vfs_poll() to get
+        *      the events, so inline completion isn't possible.
+        *   2. The completion work must not have already been scheduled.
+        *   3. ctx_lock must not be busy.  We have to use trylock because we
+        *      already hold the waitqueue lock, so this inverts the normal
+        *      locking order.  Use irqsave/irqrestore because not all
+        *      filesystems (e.g. fuse) call this function with IRQs disabled,
+        *      yet IRQs have to be disabled before ctx_lock is obtained.
+        */
+       if (mask && !req->work_scheduled &&
+           spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
                struct kioctx *ctx = iocb->ki_ctx;
 
-               /*
-                * Try to complete the iocb inline if we can. Use
-                * irqsave/irqrestore because not all filesystems (e.g. fuse)
-                * call this function with IRQs disabled and because IRQs
-                * have to be disabled before ctx_lock is obtained.
-                */
+               list_del_init(&req->wait.entry);
                list_del(&iocb->ki_list);
                iocb->ki_res.res = mangle_poll(mask);
-               req->done = true;
-               if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+               if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
                        iocb = NULL;
                        INIT_WORK(&req->work, aio_poll_put_work);
                        schedule_work(&req->work);
@@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
                if (iocb)
                        iocb_put(iocb);
        } else {
-               schedule_work(&req->work);
+               /*
+                * Schedule the completion work if needed.  If it was already
+                * scheduled, record that another wakeup came in.
+                *
+                * Don't remove the request from the waitqueue here, as it might
+                * not actually be complete yet (we won't know until vfs_poll()
+                * is called), and we must not miss any wakeups.  POLLFREE is an
+                * exception to this; see below.
+                */
+               if (req->work_scheduled) {
+                       req->work_need_resched = true;
+               } else {
+                       schedule_work(&req->work);
+                       req->work_scheduled = true;
+               }
+
+               /*
+                * If the waitqueue is being freed early but we can't complete
+                * the request inline, we have to tear down the request as best
+                * we can.  That means immediately removing the request from its
+                * waitqueue and preventing all further accesses to the
+                * waitqueue via the request.  We also need to schedule the
+                * completion work (done above).  Also mark the request as
+                * cancelled, to potentially skip an unneeded call to ->poll().
+                */
+               if (mask & POLLFREE) {
+                       WRITE_ONCE(req->cancelled, true);
+                       list_del_init(&req->wait.entry);
+
+                       /*
+                        * Careful: this *must* be the last step, since as soon
+                        * as req->head is NULL'ed out, the request can be
+                        * completed and freed, since aio_poll_complete_work()
+                        * will no longer need to take the waitqueue lock.
+                        */
+                       smp_store_release(&req->head, NULL);
+               }
        }
        return 1;
 }
@@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 struct aio_poll_table {
        struct poll_table_struct        pt;
        struct aio_kiocb                *iocb;
+       bool                            queued;
        int                             error;
 };
 
@@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
        struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
 
        /* multiple wait queues per file are not supported */
-       if (unlikely(pt->iocb->poll.head)) {
+       if (unlikely(pt->queued)) {
                pt->error = -EINVAL;
                return;
        }
 
+       pt->queued = true;
        pt->error = 0;
        pt->iocb->poll.head = head;
        add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
        req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
 
        req->head = NULL;
-       req->done = false;
        req->cancelled = false;
+       req->work_scheduled = false;
+       req->work_need_resched = false;
 
        apt.pt._qproc = aio_poll_queue_proc;
        apt.pt._key = req->events;
        apt.iocb = aiocb;
+       apt.queued = false;
        apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
 
        /* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
 
        mask = vfs_poll(req->file, &apt.pt) & req->events;
        spin_lock_irq(&ctx->ctx_lock);
-       if (likely(req->head)) {
-               spin_lock(&req->head->lock);
-               if (unlikely(list_empty(&req->wait.entry))) {
-                       if (apt.error)
+       if (likely(apt.queued)) {
+               bool on_queue = poll_iocb_lock_wq(req);
+
+               if (!on_queue || req->work_scheduled) {
+                       /*
+                        * aio_poll_wake() already either scheduled the async
+                        * completion work, or completed the request inline.
+                        */
+                       if (apt.error) /* unsupported case: multiple queues */
                                cancel = true;
                        apt.error = 0;
                        mask = 0;
                }
                if (mask || apt.error) {
+                       /* Steal to complete synchronously. */
                        list_del_init(&req->wait.entry);
                } else if (cancel) {
+                       /* Cancel if possible (may be too late though). */
                        WRITE_ONCE(req->cancelled, true);
-               } else if (!req->done) { /* actually waiting for an event */
+               } else if (on_queue) {
+                       /*
+                        * Actually waiting for an event, so add the request to
+                        * active_reqs so that it can be cancelled if needed.
+                        */
                        list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
                        aiocb->ki_cancel = aio_poll_cancel;
                }
-               spin_unlock(&req->head->lock);
+               if (on_queue)
+                       poll_iocb_unlock_wq(req);
        }
        if (mask) { /* no async, we'd stolen it */
                aiocb->ki_res.res = mangle_poll(mask);
index c3983bdaf4b886d1daa60c49c769c04c4702835e..f704339c6b8652a0c3cc1dded80c20b2426a7fb2 100644 (file)
@@ -463,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                BUG_ON(ret < 0);
                rcu_assign_pointer(root->node, cow);
 
-               btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+                                     parent_start, last_ref);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -485,8 +485,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                                return ret;
                        }
                }
-               btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+                                     parent_start, last_ref);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -927,7 +927,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
                /* once for the root ptr */
                free_extent_buffer_stale(mid);
                return 0;
@@ -986,7 +986,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        btrfs_tree_unlock(right);
                        del_ptr(root, path, level + 1, pslot + 1);
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1);
+                       btrfs_free_tree_block(trans, btrfs_root_id(root), right,
+                                             0, 1);
                        free_extent_buffer_stale(right);
                        right = NULL;
                } else {
@@ -1031,7 +1032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                btrfs_tree_unlock(mid);
                del_ptr(root, path, level + 1, pslot);
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
                free_extent_buffer_stale(mid);
                mid = NULL;
        } else {
@@ -4032,7 +4033,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
        root_sub_used(root, leaf->len);
 
        atomic_inc(&leaf->refs);
-       btrfs_free_tree_block(trans, root, leaf, 0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
        free_extent_buffer_stale(leaf);
 }
 /*
index 7553e9dc5f93830ffcaa1deeb407c13c5c4effc5..5fe5eccb3c874b5e978c2bb38149bddf77a1d5b7 100644 (file)
@@ -2257,6 +2257,11 @@ static inline bool btrfs_root_dead(const struct btrfs_root *root)
        return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
 }
 
+static inline u64 btrfs_root_id(const struct btrfs_root *root)
+{
+       return root->root_key.objectid;
+}
+
 /* struct btrfs_root_backup */
 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
                   tree_root, 64);
@@ -2719,7 +2724,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                                             u64 empty_size,
                                             enum btrfs_lock_nesting nest);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+                          u64 root_id,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref);
 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
index 2059d1504149a6326cfc2fc2bc34d9b9f1d8eb67..40c4d6ba3fb9a79d639f5b8d8de2cb018c8904be 100644 (file)
@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
 
        /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
        ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space_noquota(fs_info, len);
-       else
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       } else {
                ret = 0;
+       }
        return ret;
 }
 
@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
        if (ret < 0)
                return ret;
        ret = btrfs_delalloc_reserve_metadata(inode, len);
-       if (ret < 0)
+       if (ret < 0) {
                btrfs_free_reserved_data_space(inode, *reserved, start, len);
+               extent_changeset_free(*reserved);
+               *reserved = NULL;
+       }
        return ret;
 }
 
index 514ead6e93b6f2fc5dd687e365cedd1de4f8997f..b3f2e2232326ccca6185dc310628b273ddcd48f1 100644 (file)
@@ -1732,6 +1732,14 @@ again:
        }
        return root;
 fail:
+       /*
+        * If our caller provided us an anonymous device, then it's his
+        * responsability to free it in case we fail. So we have to set our
+        * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+        * and once again by our caller.
+        */
+       if (anon_dev)
+               root->anon_dev = 0;
        btrfs_put_root(root);
        return ERR_PTR(ret);
 }
index 3fd736a02c1e6d4ad5d2e26edda031416f33c8f3..25ef6e3fd3069f7b113be393fac05bc83c9c8295 100644 (file)
@@ -3275,20 +3275,20 @@ out_delayed_unlock:
 }
 
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+                          u64 root_id,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref)
 {
-       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_ref generic_ref = { 0 };
        int ret;
 
        btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
                               buf->start, buf->len, parent);
        btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
-                           root->root_key.objectid, 0, false);
+                           root_id, 0, false);
 
-       if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+       if (root_id != BTRFS_TREE_LOG_OBJECTID) {
                btrfs_ref_tree_mod(fs_info, &generic_ref);
                ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
                BUG_ON(ret); /* -ENOMEM */
@@ -3298,7 +3298,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                struct btrfs_block_group *cache;
                bool must_pin = false;
 
-               if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+               if (root_id != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, buf->start);
                        if (!ret) {
                                btrfs_redirty_list_add(trans->transaction, buf);
@@ -5472,7 +5472,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                        goto owner_mismatch;
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+                             wc->refs[level] == 1);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
@@ -6051,6 +6052,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
        int dev_ret = 0;
        int ret = 0;
 
+       if (range->start == U64_MAX)
+               return -EINVAL;
+
        /*
         * Check range overflow if range->len is set.
         * The default range->len is U64_MAX.
index 4e03a6d3aa324be2d0ff259e256e5d9956b35fbf..9234d96a7fd5c5e4647d3a29edd2b3a2f5b85767 100644 (file)
@@ -4313,6 +4313,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
        if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
                return;
 
+       /*
+        * A read may stumble upon this buffer later, make sure that it gets an
+        * error and knows there was an error.
+        */
+       clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+
+       /*
+        * We need to set the mapping with the io error as well because a write
+        * error will flip the file system readonly, and then syncfs() will
+        * return a 0 because we are readonly if we don't modify the err seq for
+        * the superblock.
+        */
+       mapping_set_error(page->mapping, -EIO);
+
        /*
         * If we error out, we should add back the dirty_metadata_bytes
         * to make it consistent.
@@ -6597,6 +6611,14 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
                return 0;
 
+       /*
+        * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
+        * operation, which could potentially still be in flight.  In this case
+        * we simply want to return an error.
+        */
+       if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
+               return -EIO;
+
        if (eb->fs_info->sectorsize < PAGE_SIZE)
                return read_extent_buffer_subpage(eb, wait, mirror_num);
 
index a33bca94d133ecb6022df27da5e2655f58c93fc3..3abec44c62559df0b926230f6776bbf2a0e2e88b 100644 (file)
@@ -1256,8 +1256,8 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
        btrfs_tree_lock(free_space_root->node);
        btrfs_clean_tree_block(free_space_root->node);
        btrfs_tree_unlock(free_space_root->node);
-       btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
-                             0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+                             free_space_root->node, 0, 1);
 
        btrfs_put_root(free_space_root);
 
index 92138ac2a4e2aceee153fbfc101ffd5acff42aa1..edfecfe62b4b6f5dec5cdfe811474f3abd3d60ec 100644 (file)
@@ -617,11 +617,13 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
                 * Since we don't abort the transaction in this case, free the
                 * tree block so that we don't leak space and leave the
                 * filesystem in an inconsistent state (an extent item in the
-                * extent tree without backreferences). Also no need to have
-                * the tree block locked since it is not in any tree at this
-                * point, so no other task can find it and use it.
+                * extent tree with a backreference for a root that does not
+                * exists).
                 */
-               btrfs_free_tree_block(trans, root, leaf, 0, 1);
+               btrfs_tree_lock(leaf);
+               btrfs_clean_tree_block(leaf);
+               btrfs_tree_unlock(leaf);
+               btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
                free_extent_buffer(leaf);
                goto fail;
        }
@@ -3187,10 +3189,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
                return -EPERM;
 
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args)) {
-               ret = PTR_ERR(vol_args);
-               goto out;
-       }
+       if (IS_ERR(vol_args))
+               return PTR_ERR(vol_args);
 
        if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
                ret = -EOPNOTSUPP;
index db680f5be745a0fd80cd8678b2190dfff8bdaa5c..6c037f1252b776279e8e2fe294264bc8091bb4dd 100644 (file)
@@ -1219,7 +1219,8 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        btrfs_tree_lock(quota_root->node);
        btrfs_clean_tree_block(quota_root->node);
        btrfs_tree_unlock(quota_root->node);
-       btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+       btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+                             quota_root->node, 0, 1);
 
        btrfs_put_root(quota_root);
 
index 12ceb14a114168946166c3be3f2ee7cdc96e5a43..d20166336557697f7b3d48716d63727a37aa15fa 100644 (file)
@@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
        key.offset = ref_id;
 again:
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
-       BUG_ON(ret < 0);
+       if (ret < 0)
+               goto out;
        if (ret == 0) {
                leaf = path->nodes[0];
                ref = btrfs_item_ptr(leaf, path->slots[0],
index 8ab33caf016f315129565ac66568e858e52dde56..6993dcdba6f1ad6dd466675b96fd7e5bfe459c4b 100644 (file)
@@ -1181,6 +1181,7 @@ again:
                                             parent_objectid, victim_name,
                                             victim_name_len);
                        if (ret < 0) {
+                               kfree(victim_name);
                                return ret;
                        } else if (!ret) {
                                ret = -ENOENT;
@@ -2908,6 +2909,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                                     path->nodes[*level]->len);
                                        if (ret)
                                                return ret;
+                                       btrfs_redirty_list_add(trans->transaction,
+                                                              next);
                                } else {
                                        if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                                clear_extent_buffer_dirty(next);
@@ -2988,6 +2991,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                                                next->start, next->len);
                                if (ret)
                                        goto out;
+                               btrfs_redirty_list_add(trans->transaction, next);
                        } else {
                                if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
                                        clear_extent_buffer_dirty(next);
@@ -3438,8 +3442,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
                          EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
        extent_io_tree_release(&log->log_csum_range);
 
-       if (trans && log->node)
-               btrfs_redirty_list_add(trans->transaction, log->node);
        btrfs_put_root(log);
 }
 
@@ -3976,6 +3978,7 @@ search:
                        goto done;
                }
                if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
+                       ctx->last_dir_item_offset = min_key.offset;
                        ret = overwrite_item(trans, log, dst_path,
                                             path->nodes[0], path->slots[0],
                                             &min_key);
index 0997e3cd74e915c3f056eeff302a7cc1de7dba96..fd0ced829edb8288aad16b9222c7d8bd30801a63 100644 (file)
@@ -1370,8 +1370,10 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
 
        bytenr_orig = btrfs_sb_offset(0);
        ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
-       if (ret)
-               return ERR_PTR(ret);
+       if (ret) {
+               device = ERR_PTR(ret);
+               goto error_bdev_put;
+       }
 
        disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
        if (IS_ERR(disk_super)) {
index 67d932d707984cffd3f6d502fed36c307ad4c113..678a2946951196028da3473dd36471640f481488 100644 (file)
@@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
        block_group->alloc_offset = block_group->zone_capacity;
        block_group->free_space_ctl->free_space = 0;
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
@@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
        ASSERT(block_group->alloc_offset == block_group->zone_capacity);
        ASSERT(block_group->free_space_ctl->free_space == 0);
        btrfs_clear_treelog_bg(block_group);
+       btrfs_clear_data_reloc_bg(block_group);
        spin_unlock(&block_group->lock);
 
        map = block_group->physical_map;
index b9460b6fb76f7c55999f9be71dcd93d014699a08..c447fa2e2d1feb89ff9333a22203ad81fe41b436 100644 (file)
@@ -4350,7 +4350,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
        int bits = (fmode << 1) | 1;
-       bool is_opened = false;
+       bool already_opened = false;
        int i;
 
        if (count == 1)
@@ -4358,19 +4358,19 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
 
        spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
-               if (bits & (1 << i))
-                       ci->i_nr_by_mode[i] += count;
-
                /*
-                * If any of the mode ref is larger than 1,
+                * If any of the mode ref is larger than 0,
                 * that means it has been already opened by
                 * others. Just skip checking the PIN ref.
                 */
-               if (i && ci->i_nr_by_mode[i] > 1)
-                       is_opened = true;
+               if (i && ci->i_nr_by_mode[i])
+                       already_opened = true;
+
+               if (bits & (1 << i))
+                       ci->i_nr_by_mode[i] += count;
        }
 
-       if (!is_opened)
+       if (!already_opened)
                percpu_counter_inc(&mdsc->metric.opened_inodes);
        spin_unlock(&ci->i_ceph_lock);
 }
index 02a0a0fd9ccd51c7f4d11b60c875eaa081d0e2a6..c138e8126286cfe52996f982071003fbc95243a6 100644 (file)
@@ -605,13 +605,25 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
        in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
        in.cap.flags = CEPH_CAP_FLAG_AUTH;
        in.ctime = in.mtime = in.atime = iinfo.btime;
-       in.mode = cpu_to_le32((u32)mode);
        in.truncate_seq = cpu_to_le32(1);
        in.truncate_size = cpu_to_le64(-1ULL);
        in.xattr_version = cpu_to_le64(1);
        in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
-       in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
-                               dir->i_gid : current_fsgid()));
+       if (dir->i_mode & S_ISGID) {
+               in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
+
+               /* Directories always inherit the setgid bit. */
+               if (S_ISDIR(mode))
+                       mode |= S_ISGID;
+               else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+                        !in_group_p(dir->i_gid) &&
+                        !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
+                       mode &= ~S_ISGID;
+       } else {
+               in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+       }
+       in.mode = cpu_to_le32((u32)mode);
+
        in.nlink = cpu_to_le32(1);
        in.max_size = cpu_to_le64(lo->stripe_unit);
 
@@ -847,7 +859,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
        ssize_t ret;
        u64 off = iocb->ki_pos;
        u64 len = iov_iter_count(to);
-       u64 i_size;
+       u64 i_size = i_size_read(inode);
 
        dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
index 250aad330a1062a5cbfef584f8c60819ffb5a929..c30eefc0ac19346e086128e3872a8d1848ea1dc2 100644 (file)
@@ -3683,7 +3683,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
        struct ceph_pagelist *pagelist = recon_state->pagelist;
        struct dentry *dentry;
        char *path;
-       int pathlen, err;
+       int pathlen = 0, err;
        u64 pathbase;
        u64 snap_follows;
 
@@ -3703,7 +3703,6 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
        } else {
                path = NULL;
-               pathlen = 0;
                pathbase = 0;
        }
 
index 6b705026da1a37d238348e0fefe06f022913bfe9..1060164b984a7459b5d2a05832e0b115490e8db9 100644 (file)
@@ -1562,6 +1562,10 @@ smbd_connected:
        /* fscache server cookies are based on primary channel only */
        if (!CIFS_SERVER_IS_CHAN(tcp_ses))
                cifs_fscache_get_client_cookie(tcp_ses);
+#ifdef CONFIG_CIFS_FSCACHE
+       else
+               tcp_ses->fscache = tcp_ses->primary_server->fscache;
+#endif /* CONFIG_CIFS_FSCACHE */
 
        /* queue echo request delayed work */
        queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -3046,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
                                cifs_dbg(VFS, "read only mount of RW share\n");
                        /* no need to log a RW mount of a typical RW share */
                }
-               /*
-                * The cookie is initialized from volume info returned above.
-                * Inside cifs_fscache_get_super_cookie it checks
-                * that we do not get super cookie twice.
-                */
-               cifs_fscache_get_super_cookie(tcon);
        }
 
        /*
@@ -3066,6 +3064,13 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
            (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
                cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
 
+       /*
+        * The cookie is initialized from volume info returned above.
+        * Inside cifs_fscache_get_super_cookie it checks
+        * that we do not get super cookie twice.
+        */
+       cifs_fscache_get_super_cookie(tcon);
+
 out:
        mnt_ctx->server = server;
        mnt_ctx->ses = ses;
@@ -3426,6 +3431,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list
         */
        mount_put_conns(mnt_ctx);
        mount_get_dfs_conns(mnt_ctx);
+       set_root_ses(mnt_ctx);
 
        full_path = build_unc_path_to_root(ctx, cifs_sb, true);
        if (IS_ERR(full_path))
index 6a179ae753c118bdfc3fa3b78865431821baa60e..e3ed25dc6f3f6c4225273b5fe8839d9be3545433 100644 (file)
@@ -434,6 +434,42 @@ out:
        return rc;
 }
 
+/*
+ * Remove duplicate path delimiters. Windows is supposed to do that
+ * but there are some bugs that prevent rename from working if there are
+ * multiple delimiters.
+ *
+ * Returns a sanitized duplicate of @path. The caller is responsible for
+ * cleaning up the original.
+ */
+#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+static char *sanitize_path(char *path)
+{
+       char *cursor1 = path, *cursor2 = path;
+
+       /* skip all prepended delimiters */
+       while (IS_DELIM(*cursor1))
+               cursor1++;
+
+       /* copy the first letter */
+       *cursor2 = *cursor1;
+
+       /* copy the remainder... */
+       while (*(cursor1++)) {
+               /* ... skipping all duplicated delimiters */
+               if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
+                       continue;
+               *(++cursor2) = *cursor1;
+       }
+
+       /* if the last character is a delimiter, skip it */
+       if (IS_DELIM(*(cursor2 - 1)))
+               cursor2--;
+
+       *(cursor2) = '\0';
+       return kstrdup(path, GFP_KERNEL);
+}
+
 /*
  * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
  * fields with the result. Returns 0 on success and an error otherwise
@@ -493,7 +529,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
        if (!*pos)
                return 0;
 
-       ctx->prepath = kstrdup(pos, GFP_KERNEL);
+       ctx->prepath = sanitize_path(pos);
        if (!ctx->prepath)
                return -ENOMEM;
 
index 7e409a38a2d7c184abf3ab0c1afbea2ccbfe9f6f..003c5f1f4dfb1a058afee9a917818f6c43891d75 100644 (file)
  * Key layout of CIFS server cache index object
  */
 struct cifs_server_key {
-       struct {
-               uint16_t        family;         /* address family */
-               __be16          port;           /* IP port */
-       } hdr;
-       union {
-               struct in_addr  ipv4_addr;
-               struct in6_addr ipv6_addr;
-       };
+       __u64 conn_id;
 } __packed;
 
 /*
@@ -31,42 +24,23 @@ struct cifs_server_key {
  */
 void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
 {
-       const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
-       const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
-       const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
        struct cifs_server_key key;
-       uint16_t key_len = sizeof(key.hdr);
-
-       memset(&key, 0, sizeof(key));
 
        /*
-        * Should not be a problem as sin_family/sin6_family overlays
-        * sa_family field
+        * Check if cookie was already initialized so don't reinitialize it.
+        * In the future, as we integrate with newer fscache features,
+        * we may want to instead add a check if cookie has changed
         */
-       key.hdr.family = sa->sa_family;
-       switch (sa->sa_family) {
-       case AF_INET:
-               key.hdr.port = addr->sin_port;
-               key.ipv4_addr = addr->sin_addr;
-               key_len += sizeof(key.ipv4_addr);
-               break;
-
-       case AF_INET6:
-               key.hdr.port = addr6->sin6_port;
-               key.ipv6_addr = addr6->sin6_addr;
-               key_len += sizeof(key.ipv6_addr);
-               break;
-
-       default:
-               cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
-               server->fscache = NULL;
+       if (server->fscache)
                return;
-       }
+
+       memset(&key, 0, sizeof(key));
+       key.conn_id = server->conn_id;
 
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
                                       &cifs_fscache_server_index_def,
-                                      &key, key_len,
+                                      &key, sizeof(key),
                                       NULL, 0,
                                       server, 0, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
@@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
         * In the future, as we integrate with newer fscache features,
         * we may want to instead add a check if cookie has changed
         */
-       if (tcon->fscache == NULL)
+       if (tcon->fscache)
                return;
 
        sharename = extract_sharename(tcon->treeName);
index 82848412ad85208f08d1fad12bc2871297c8f252..279622e4eb1c290b41d017bda10cb5cda93b5575 100644 (file)
@@ -1356,11 +1356,6 @@ iget_no_retry:
                goto out;
        }
 
-#ifdef CONFIG_CIFS_FSCACHE
-       /* populate tcon->resource_id */
-       tcon->resource_id = CIFS_I(inode)->uniqueid;
-#endif
-
        if (rc && tcon->pipe) {
                cifs_dbg(FYI, "ipc connection - fake read inode\n");
                spin_lock(&inode->i_lock);
@@ -1375,7 +1370,6 @@ iget_no_retry:
                iget_failed(inode);
                inode = ERR_PTR(rc);
        }
-
 out:
        kfree(path);
        free_xid(xid);
index af63548eaf26daf83b550853df91a07c6e0476e9..035dc3e245dca9f569afeceed54f8d0212f62968 100644 (file)
@@ -590,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
 {
        unsigned int tioffset; /* challenge message target info area */
        unsigned int tilen; /* challenge message target info area length  */
-
        CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+       __u32 server_flags;
 
        if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
                cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
@@ -609,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
 
+       server_flags = le32_to_cpu(pblob->NegotiateFlags);
+       cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+                ses->ntlmssp->client_flags, server_flags);
+
+       if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+           (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+               cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+                        __func__);
+               return -EINVAL;
+       }
+       if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+               cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+               return -EINVAL;
+       }
+       if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+               cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+                        __func__);
+               return -EOPNOTSUPP;
+       }
+       if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+               pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+                            __func__);
+
+       ses->ntlmssp->server_flags = server_flags;
+
        memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
-       /* BB we could decode pblob->NegotiateFlags; some may be useful */
        /* In particular we can examine sign flags */
        /* BB spec says that if AvId field of MsvAvTimestamp is populated then
                we must set the MIC field of the AUTHENTICATE_MESSAGE */
-       ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
        tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
        tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
        if (tioffset > blob_len || tioffset + tilen > blob_len) {
@@ -721,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL;
-       if (server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
+               NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+               NTLMSSP_NEGOTIATE_SIGN;
        if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
                flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
 
        tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+       ses->ntlmssp->client_flags = flags;
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
        /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
@@ -779,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
 
-       flags = NTLMSSP_NEGOTIATE_56 |
-               NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
-               NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
-               NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
-               NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
-       if (ses->server->sign)
-               flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
-               flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+       flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+               NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
 
        tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -834,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
                                      *pbuffer, &tmp,
                                      nls_cp);
 
-       if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
-               (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
-                       && !calc_seckey(ses)) {
+       if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+           (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+           !calc_seckey(ses)) {
                memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
                sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
index 8627dacfc4246fb975ed38f2b2dc8090f7c7202c..97d212a9b814454b83091e9a1dc40b76fe908b38 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -841,24 +841,68 @@ void do_close_on_exec(struct files_struct *files)
        spin_unlock(&files->file_lock);
 }
 
+static inline struct file *__fget_files_rcu(struct files_struct *files,
+       unsigned int fd, fmode_t mask, unsigned int refs)
+{
+       for (;;) {
+               struct file *file;
+               struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+               struct file __rcu **fdentry;
+
+               if (unlikely(fd >= fdt->max_fds))
+                       return NULL;
+
+               fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
+               file = rcu_dereference_raw(*fdentry);
+               if (unlikely(!file))
+                       return NULL;
+
+               if (unlikely(file->f_mode & mask))
+                       return NULL;
+
+               /*
+                * Ok, we have a file pointer. However, because we do
+                * this all locklessly under RCU, we may be racing with
+                * that file being closed.
+                *
+                * Such a race can take two forms:
+                *
+                *  (a) the file ref already went down to zero,
+                *      and get_file_rcu_many() fails. Just try
+                *      again:
+                */
+               if (unlikely(!get_file_rcu_many(file, refs)))
+                       continue;
+
+               /*
+                *  (b) the file table entry has changed under us.
+                *       Note that we don't need to re-check the 'fdt->fd'
+                *       pointer having changed, because it always goes
+                *       hand-in-hand with 'fdt'.
+                *
+                * If so, we need to put our refs and try again.
+                */
+               if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
+                   unlikely(rcu_dereference_raw(*fdentry) != file)) {
+                       fput_many(file, refs);
+                       continue;
+               }
+
+               /*
+                * Ok, we have a ref to the file, and checked that it
+                * still exists.
+                */
+               return file;
+       }
+}
+
 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
                                 fmode_t mask, unsigned int refs)
 {
        struct file *file;
 
        rcu_read_lock();
-loop:
-       file = files_lookup_fd_rcu(files, fd);
-       if (file) {
-               /* File object ref couldn't be taken.
-                * dup2() atomicity guarantee is the reason
-                * we loop to catch the new file (or NULL pointer)
-                */
-               if (file->f_mode & mask)
-                       file = NULL;
-               else if (!get_file_rcu_many(file, refs))
-                       goto loop;
-       }
+       file = __fget_files_rcu(files, fd, mask, refs);
        rcu_read_unlock();
 
        return file;
index 8dbd6fe664204b8a7bee7b8908fdbe6c3aa312b2..44a7a4288956b60d174340befd7ee3b19bc8c8e3 100644 (file)
@@ -1857,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
 
 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
 {
-       struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
        unsigned long delay = 0;
        unsigned long holdtime;
        unsigned long now = jiffies;
@@ -1890,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
         * keep the glock until the last strong holder is done with it.
         */
        if (!find_first_strong_holder(gl)) {
-               if (state == LM_ST_UNLOCKED)
-                       mock_gh.gh_state = LM_ST_EXCLUSIVE;
+               struct gfs2_holder mock_gh = {
+                       .gh_gl = gl,
+                       .gh_state = (state == LM_ST_UNLOCKED) ?
+                                   LM_ST_EXCLUSIVE : state,
+                       .gh_iflags = BIT(HIF_HOLDER)
+               };
+
                demote_incompat_holders(gl, &mock_gh);
        }
        handle_callback(gl, state, delay, true);
index 6424b903e88515f191bc21b7f4b72cd51c0a70f6..89905f4f29bb6de91e181373c4f168431f3e8e74 100644 (file)
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
 static const struct inode_operations gfs2_dir_iops;
 static const struct inode_operations gfs2_symlink_iops;
 
-static int iget_test(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
-       u64 no_addr = *(u64 *)opaque;
-
-       GFS2_I(inode)->i_no_addr = no_addr;
-       inode->i_ino = no_addr;
-       return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
-       struct inode *inode;
-
-repeat:
-       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
-       if (!inode)
-               return inode;
-       if (is_bad_inode(inode)) {
-               iput(inode);
-               goto repeat;
-       }
-       return inode;
-}
-
 /**
  * gfs2_set_iop - Sets inode operations
  * @inode: The inode with correct i_mode filled in
@@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode)
        }
 }
 
+static int iget_test(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       GFS2_I(inode)->i_no_addr = no_addr;
+       inode->i_ino = no_addr;
+       return 0;
+}
+
 /**
  * gfs2_inode_lookup - Lookup an inode
  * @sb: The super block
@@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 {
        struct inode *inode;
        struct gfs2_inode *ip;
-       struct gfs2_glock *io_gl = NULL;
        struct gfs2_holder i_gh;
        int error;
 
        gfs2_holder_mark_uninitialized(&i_gh);
-       inode = gfs2_iget(sb, no_addr);
+       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
@@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (inode->i_state & I_NEW) {
                struct gfs2_sbd *sdp = GFS2_SB(inode);
+               struct gfs2_glock *io_gl;
 
                error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
                if (unlikely(error))
                        goto fail;
-               flush_delayed_work(&ip->i_gl->gl_work);
-
-               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-               if (unlikely(error))
-                       goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(io_gl);
 
                if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
                        /*
                         * The GL_SKIP flag indicates to skip reading the inode
-                        * block.  We read the inode with gfs2_inode_refresh
+                        * block.  We read the inode when instantiating it
                         * after possibly checking the block type.
                         */
                        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                        }
                }
 
-               glock_set_object(ip->i_gl, ip);
                set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
-               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+
+               error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
                if (unlikely(error))
                        goto fail;
-               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
+               error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                gfs2_glock_put(io_gl);
-               io_gl = NULL;
+               if (unlikely(error))
+                       goto fail;
 
                /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
                inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
                inode->i_atime.tv_nsec = 0;
 
+               glock_set_object(ip->i_gl, ip);
+
                if (type == DT_UNKNOWN) {
                        /* Inode glock must be locked already */
                        error = gfs2_instantiate(&i_gh);
-                       if (error)
+                       if (error) {
+                               glock_clear_object(ip->i_gl, ip);
                                goto fail;
+                       }
                } else {
                        ip->i_no_formal_ino = no_formal_ino;
                        inode->i_mode = DT2IF(type);
@@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
                if (gfs2_holder_initialized(&i_gh))
                        gfs2_glock_dq_uninit(&i_gh);
+               glock_set_object(ip->i_iopen_gh.gh_gl, ip);
 
                gfs2_set_iop(inode);
+               unlock_new_inode(inode);
        }
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
-               error = -ESTALE;
-               if (inode->i_state & I_NEW)
-                       goto fail;
                iput(inode);
-               return ERR_PTR(error);
+               return ERR_PTR(-ESTALE);
        }
 
-       if (inode->i_state & I_NEW)
-               unlock_new_inode(inode);
-
        return inode;
 
 fail:
-       if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
-               glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+       if (gfs2_holder_initialized(&ip->i_iopen_gh))
                gfs2_glock_dq_uninit(&ip->i_iopen_gh);
-       }
-       if (io_gl)
-               gfs2_glock_put(io_gl);
        if (gfs2_holder_initialized(&i_gh))
                gfs2_glock_dq_uninit(&i_gh);
        iget_failed(inode);
@@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
        if (error)
                goto fail_free_inode;
-       flush_delayed_work(&ip->i_gl->gl_work);
 
        error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
        if (error)
                goto fail_free_inode;
        gfs2_cancel_delete_work(io_gl);
 
+       error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+       BUG_ON(error);
+
        error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
        if (error)
                goto fail_gunlock2;
 
-       glock_set_object(ip->i_gl, ip);
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
                goto fail_gunlock2;
@@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (error)
                goto fail_gunlock2;
 
+       glock_set_object(ip->i_gl, ip);
        glock_set_object(io_gl, ip);
        gfs2_set_iop(inode);
-       insert_inode_hash(inode);
 
        free_vfs_inode = 0; /* After this point, the inode is no longer
                               considered free. Any failures need to undo
@@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        gfs2_glock_dq_uninit(ghs + 1);
        gfs2_glock_put(io_gl);
        gfs2_qa_put(dip);
+       unlock_new_inode(inode);
        return error;
 
 fail_gunlock3:
+       glock_clear_object(ip->i_gl, ip);
        glock_clear_object(io_gl, ip);
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
-       glock_clear_object(io_gl, ip);
        gfs2_glock_put(io_gl);
 fail_free_inode:
        if (ip->i_gl) {
-               glock_clear_object(ip->i_gl, ip);
                if (free_vfs_inode) /* else evict will do the put for us */
                        gfs2_glock_put(ip->i_gl);
        }
@@ -829,7 +807,10 @@ fail_gunlock:
                        mark_inode_dirty(inode);
                set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
                        &GFS2_I(inode)->i_flags);
-               iput(inode);
+               if (inode->i_state & I_NEW)
+                       iget_failed(inode);
+               else
+                       iput(inode);
        }
        if (gfs2_holder_initialized(ghs + 1))
                gfs2_glock_dq_uninit(ghs + 1);
index 88202de519f6d1755103f435ae6b2eea14e55adb..5c4f582d6549a11a8301fd793488a516fc41aaf0 100644 (file)
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
                                        struct io_wqe_acct *acct,
                                        struct io_cb_cancel_data *match);
 static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
            test_and_set_bit_lock(0, &worker->create_state))
                goto fail_release;
 
+       atomic_inc(&wq->worker_refs);
        init_task_work(&worker->create_work, func);
        worker->create_index = acct->index;
        if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
-               clear_bit_unlock(0, &worker->create_state);
+               /*
+                * EXIT may have been set after checking it above, check after
+                * adding the task_work and remove any creation item if it is
+                * now set. wq exit does that too, but we can have added this
+                * work item after we canceled in io_wq_exit_workers().
+                */
+               if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+                       io_wq_cancel_tw_create(wq);
+               io_worker_ref_put(wq);
                return true;
        }
+       io_worker_ref_put(wq);
        clear_bit_unlock(0, &worker->create_state);
 fail_release:
        io_worker_release(worker);
@@ -384,7 +395,9 @@ static void io_wqe_dec_running(struct io_worker *worker)
        if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
                atomic_inc(&acct->nr_running);
                atomic_inc(&wqe->wq->worker_refs);
+               raw_spin_unlock(&wqe->lock);
                io_queue_worker_create(worker, acct, create_worker_cb);
+               raw_spin_lock(&wqe->lock);
        }
 }
 
@@ -714,6 +727,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
 
 static inline bool io_should_retry_thread(long err)
 {
+       /*
+        * Prevent perpetual task_work retry, if the task (or its group) is
+        * exiting.
+        */
+       if (fatal_signal_pending(current))
+               return false;
+
        switch (err) {
        case -EAGAIN:
        case -ERESTARTSYS:
@@ -1191,13 +1211,9 @@ void io_wq_exit_start(struct io_wq *wq)
        set_bit(IO_WQ_BIT_EXIT, &wq->state);
 }
 
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
 {
        struct callback_head *cb;
-       int node;
-
-       if (!wq->task)
-               return;
 
        while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
                struct io_worker *worker;
@@ -1205,6 +1221,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
        }
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+       int node;
+
+       if (!wq->task)
+               return;
+
+       io_wq_cancel_tw_create(wq);
 
        rcu_read_lock();
        for_each_node(node) {
index c4f217613f56e3945c95d22f9f6c421544478f61..fb2a0cb4aaf835d872b9276534c714d3a789745b 100644 (file)
@@ -2891,9 +2891,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
-       if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
-               req->flags |= REQ_F_CUR_POS;
-               kiocb->ki_pos = file->f_pos;
+       if (kiocb->ki_pos == -1) {
+               if (!(file->f_mode & FMODE_STREAM)) {
+                       req->flags |= REQ_F_CUR_POS;
+                       kiocb->ki_pos = file->f_pos;
+               } else {
+                       kiocb->ki_pos = 0;
+               }
        }
        kiocb->ki_flags = iocb_flags(file);
        ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
@@ -9824,7 +9828,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 
 /*
  * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  */
 static __cold void io_uring_cancel_generic(bool cancel_all,
                                           struct io_sq_data *sqd)
@@ -9866,8 +9870,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
                                                             cancel_all);
                }
 
-               prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+               io_run_task_work();
                io_uring_drop_tctx_refs(current);
+
                /*
                 * If we've seen completions, retry without waiting. This
                 * avoids a race where a completion comes in before we did
index 8317f7ca402b4f5864a1e70b7d31152bebc44e37..5052be9261d91efb810757256efb913eb97ebc7b 100644 (file)
@@ -148,7 +148,7 @@ static int ndr_read_int16(struct ndr *n, __u16 *value)
 static int ndr_read_int32(struct ndr *n, __u32 *value)
 {
        if (n->offset + sizeof(__u32) > n->length)
-               return 0;
+               return -EINVAL;
 
        if (value)
                *value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
index 0a5d8450e835fa4a804d7f05ab78ce46f41df9f0..02a44d28bdafc2a0d655e78e6d60826816186522 100644 (file)
@@ -271,9 +271,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
                conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
 
-       if (conn->cipher_type)
-               conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
        if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
                conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
 
index 49c9da37315c8383c3ed6de855dcaa14a5604322..b8b3a4c28b749a9317d57511aae3a49d5739db8f 100644 (file)
@@ -915,6 +915,25 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
        }
 }
 
+/**
+ * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
+ * @conn:      smb connection
+ *
+ * Return:     true if connection should be encrypted, else false
+ */
+static bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
+{
+       if (!conn->ops->generate_encryptionkey)
+               return false;
+
+       /*
+        * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
+        * SMB 3.1.1 uses the cipher_type field.
+        */
+       return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
+           conn->cipher_type;
+}
+
 static void decode_compress_ctxt(struct ksmbd_conn *conn,
                                 struct smb2_compression_capabilities_context *pneg_ctxt)
 {
@@ -1469,8 +1488,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
                    (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
                        sess->sign = true;
 
-               if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION &&
-                   conn->ops->generate_encryptionkey &&
+               if (smb3_encryption_negotiated(conn) &&
                    !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
                        rc = conn->ops->generate_encryptionkey(sess);
                        if (rc) {
@@ -1559,8 +1577,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
            (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
                sess->sign = true;
 
-       if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) &&
-           conn->ops->generate_encryptionkey) {
+       if (smb3_encryption_negotiated(conn)) {
                retval = conn->ops->generate_encryptionkey(sess);
                if (retval) {
                        ksmbd_debug(SMB,
@@ -2962,6 +2979,10 @@ int smb2_open(struct ksmbd_work *work)
                                                            &pntsd_size, &fattr);
                                        posix_acl_release(fattr.cf_acls);
                                        posix_acl_release(fattr.cf_dacls);
+                                       if (rc) {
+                                               kfree(pntsd);
+                                               goto err_out;
+                                       }
 
                                        rc = ksmbd_vfs_set_sd_xattr(conn,
                                                                    user_ns,
index 9320a42dfaf9737629045c4d81908f96993edd39..75c76cbb27ccfd130943f8a9810f5854e4cb09f6 100644 (file)
@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
        netfs_rreq_do_write_to_cache(rreq);
 }
 
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
-                                     bool was_async)
+static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
 {
-       if (was_async) {
-               rreq->work.func = netfs_rreq_write_to_cache_work;
-               if (!queue_work(system_unbound_wq, &rreq->work))
-                       BUG();
-       } else {
-               netfs_rreq_do_write_to_cache(rreq);
-       }
+       rreq->work.func = netfs_rreq_write_to_cache_work;
+       if (!queue_work(system_unbound_wq, &rreq->work))
+               BUG();
 }
 
 /*
@@ -558,7 +553,7 @@ again:
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
        if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
-               return netfs_rreq_write_to_cache(rreq, was_async);
+               return netfs_rreq_write_to_cache(rreq);
 
        netfs_rreq_completed(rreq, was_async);
 }
@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
        rreq = netfs_alloc_read_request(ops, netfs_priv, file);
        if (!rreq) {
                if (netfs_priv)
-                       ops->cleanup(netfs_priv, folio_file_mapping(folio));
+                       ops->cleanup(folio_file_mapping(folio), netfs_priv);
                folio_unlock(folio);
                return -ENOMEM;
        }
@@ -1008,8 +1003,8 @@ out:
 }
 EXPORT_SYMBOL(netfs_readpage);
 
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
  * @folio: The folio being prepared
  * @pos: starting position for the write
  * @len: length of write
@@ -1191,7 +1186,7 @@ have_folio:
                goto error;
 have_folio_no_wait:
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        *_folio = folio;
        _leave(" = 0");
        return 0;
@@ -1202,7 +1197,7 @@ error:
        folio_unlock(folio);
        folio_put(folio);
        if (netfs_priv)
-               ops->cleanup(netfs_priv, mapping);
+               ops->cleanup(mapping, netfs_priv);
        _leave(" = %d", ret);
        return ret;
 }
index 4418517f6f120aa0a19b3187d81045f318918650..15dac36ca852e1eb268c8089ecdf43f9dd25d4d6 100644 (file)
@@ -438,22 +438,19 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
 
 static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
                                     struct nfsd3_readdirres *resp,
-                                    int count)
+                                    u32 count)
 {
        struct xdr_buf *buf = &resp->dirlist;
        struct xdr_stream *xdr = &resp->xdr;
 
-       count = min_t(u32, count, svc_max_payload(rqstp));
+       count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
 
        memset(buf, 0, sizeof(*buf));
 
        /* Reserve room for the NULL ptr & eof flag (-2 words) */
        buf->buflen = count - XDR_UNIT * 2;
        buf->pages = rqstp->rq_next_page;
-       while (count > 0) {
-               rqstp->rq_next_page++;
-               count -= PAGE_SIZE;
-       }
+       rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
        /* This is xdr_init_encode(), but it assumes that
         * the head kvec has already been consumed. */
@@ -462,7 +459,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
        xdr->page_ptr = buf->pages;
        xdr->iov = NULL;
        xdr->p = page_address(*buf->pages);
-       xdr->end = xdr->p + (PAGE_SIZE >> 2);
+       xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
        xdr->rqst = NULL;
 }
 
index 6fedc49726bf7c7b1d27ff692c3913fefbb1ccc9..c634483d85d2a312db0b69b61294d35bab845d9d 100644 (file)
@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
 int
 register_cld_notifier(void)
 {
+       WARN_ON(!nfsd_net_id);
        return rpc_pipefs_notifier_register(&nfsd4_cld_block);
 }
 
index bfad94c70b84bcb0b95e55504dfd9c76e5d25d0f..1956d377d1a608e3a43ac87eff0ba770dcaddc62 100644 (file)
@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
        return 0;
 }
 
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+       return !(list_empty(&dp->dl_perfile));
+}
+
 static bool
 unhash_delegation_locked(struct nfs4_delegation *dp)
 {
@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
 
        lockdep_assert_held(&state_lock);
 
-       if (list_empty(&dp->dl_perfile))
+       if (!delegation_hashed(dp))
                return false;
 
        dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
         * queued for a lease break. Don't queue it again.
         */
        spin_lock(&state_lock);
-       if (dp->dl_time == 0) {
+       if (delegation_hashed(dp) && dp->dl_time == 0) {
                dp->dl_time = ktime_get_boottime_seconds();
                list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
        }
index af8531c3854a93204bc2f785068ca1d429628a7e..51a49e0cfe3762e84f324f900b82f890dcfa5425 100644 (file)
@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
        int retval;
        printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
 
-       retval = register_cld_notifier();
-       if (retval)
-               return retval;
        retval = nfsd4_init_slabs();
        if (retval)
-               goto out_unregister_notifier;
+               return retval;
        retval = nfsd4_init_pnfs();
        if (retval)
                goto out_free_slabs;
@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
                goto out_free_exports;
        retval = register_pernet_subsys(&nfsd_net_ops);
        if (retval < 0)
+               goto out_free_filesystem;
+       retval = register_cld_notifier();
+       if (retval)
                goto out_free_all;
        return 0;
 out_free_all:
+       unregister_pernet_subsys(&nfsd_net_ops);
+out_free_filesystem:
        unregister_filesystem(&nfsd_fs_type);
 out_free_exports:
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1561,13 +1563,12 @@ out_free_pnfs:
        nfsd4_exit_pnfs();
 out_free_slabs:
        nfsd4_free_slabs();
-out_unregister_notifier:
-       unregister_cld_notifier();
        return retval;
 }
 
 static void __exit exit_nfsd(void)
 {
+       unregister_cld_notifier();
        unregister_pernet_subsys(&nfsd_net_ops);
        nfsd_drc_slab_free();
        remove_proc_entry("fs/nfs/exports", NULL);
@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
        nfsd4_free_slabs();
        nfsd4_exit_pnfs();
        unregister_filesystem(&nfsd_fs_type);
-       unregister_cld_notifier();
 }
 
 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
index eea5b59b6a6cae47ab51c08848ab21e601464578..de282f3273c501bd51a1991e83a8069123e10532 100644 (file)
@@ -556,17 +556,17 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
 
 static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
                                    struct nfsd_readdirres *resp,
-                                   int count)
+                                   u32 count)
 {
        struct xdr_buf *buf = &resp->dirlist;
        struct xdr_stream *xdr = &resp->xdr;
 
-       count = min_t(u32, count, PAGE_SIZE);
+       count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
 
        memset(buf, 0, sizeof(*buf));
 
        /* Reserve room for the NULL ptr & eof flag (-2 words) */
-       buf->buflen = count - sizeof(__be32) * 2;
+       buf->buflen = count - XDR_UNIT * 2;
        buf->pages = rqstp->rq_next_page;
        rqstp->rq_next_page++;
 
@@ -577,7 +577,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
        xdr->page_ptr = buf->pages;
        xdr->iov = NULL;
        xdr->p = page_address(*buf->pages);
-       xdr->end = xdr->p + (PAGE_SIZE >> 2);
+       xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
        xdr->rqst = NULL;
 }
 
index 040e1cf9052826ef08bf269aeb63d11c073d16b4..65ce0e72e7b9588d344ee715cee6838f7e1cb4e1 100644 (file)
 
 void signalfd_cleanup(struct sighand_struct *sighand)
 {
-       wait_queue_head_t *wqh = &sighand->signalfd_wqh;
-       /*
-        * The lockless check can race with remove_wait_queue() in progress,
-        * but in this case its caller should run under rcu_read_lock() and
-        * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
-        */
-       if (likely(!waitqueue_active(wqh)))
-               return;
-
-       /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
-       wake_up_poll(wqh, EPOLLHUP | POLLFREE);
+       wake_up_pollfree(&sighand->signalfd_wqh);
 }
 
 struct signalfd_ctx {
index 85ba15a60b13b36887e0bb3901f8125eb1f6b97b..043e4cb839fa235f7c2e05fac3fe558e82e9d4f2 100644 (file)
@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
        ctx->y = y;
 }
 EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
-
-static int __init
-init_smbfs_common(void)
-{
-       return 0;
-}
-static void __init
-exit_smbfs_common(void)
-{
-}
-
-module_init(init_smbfs_common)
-module_exit(exit_smbfs_common)
index 925a621b432e3c0c74e4fa329ad9689484bc9518..3616839c5c4b64e1c8a8c389bc3292b8262d7f5e 100644 (file)
@@ -161,6 +161,77 @@ struct tracefs_fs_info {
        struct tracefs_mount_opts mount_opts;
 };
 
+static void change_gid(struct dentry *dentry, kgid_t gid)
+{
+       if (!dentry->d_inode)
+               return;
+       dentry->d_inode->i_gid = gid;
+}
+
+/*
+ * Taken from d_walk, but without he need for handling renames.
+ * Nothing can be renamed while walking the list, as tracefs
+ * does not support renames. This is only called when mounting
+ * or remounting the file system, to set all the files to
+ * the given gid.
+ */
+static void set_gid(struct dentry *parent, kgid_t gid)
+{
+       struct dentry *this_parent;
+       struct list_head *next;
+
+       this_parent = parent;
+       spin_lock(&this_parent->d_lock);
+
+       change_gid(this_parent, gid);
+repeat:
+       next = this_parent->d_subdirs.next;
+resume:
+       while (next != &this_parent->d_subdirs) {
+               struct list_head *tmp = next;
+               struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+               next = tmp->next;
+
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+               change_gid(dentry, gid);
+
+               if (!list_empty(&dentry->d_subdirs)) {
+                       spin_unlock(&this_parent->d_lock);
+                       spin_release(&dentry->d_lock.dep_map, _RET_IP_);
+                       this_parent = dentry;
+                       spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
+                       goto repeat;
+               }
+               spin_unlock(&dentry->d_lock);
+       }
+       /*
+        * All done at this level ... ascend and resume the search.
+        */
+       rcu_read_lock();
+ascend:
+       if (this_parent != parent) {
+               struct dentry *child = this_parent;
+               this_parent = child->d_parent;
+
+               spin_unlock(&child->d_lock);
+               spin_lock(&this_parent->d_lock);
+
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
+                       if (next == &this_parent->d_subdirs)
+                               goto ascend;
+                       child = list_entry(next, struct dentry, d_child);
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
+               rcu_read_unlock();
+               goto resume;
+       }
+       rcu_read_unlock();
+       spin_unlock(&this_parent->d_lock);
+       return;
+}
+
 static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
 {
        substring_t args[MAX_OPT_ARGS];
@@ -193,6 +264,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
                        if (!gid_valid(gid))
                                return -EINVAL;
                        opts->gid = gid;
+                       set_gid(tracefs_mount->mnt_root, gid);
                        break;
                case Opt_mode:
                        if (match_octal(&args[0], &option))
@@ -414,6 +486,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        inode->i_mode = mode;
        inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
        d_instantiate(dentry, inode);
        fsnotify_create(dentry->d_parent->d_inode, dentry);
        return end_creating(dentry);
@@ -436,6 +510,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
        inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
        inode->i_op = ops;
        inode->i_fop = &simple_dir_operations;
+       inode->i_uid = d_inode(dentry->d_parent)->i_uid;
+       inode->i_gid = d_inode(dentry->d_parent)->i_gid;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
index 64b9bf33480659fcbaa0a5c8c5ad259c9a44cd55..6771f357ad2cce9738c4bdbc9720763e1a3f5025 100644 (file)
@@ -3122,7 +3122,6 @@ xfs_rename(
         * appropriately.
         */
        if (flags & RENAME_WHITEOUT) {
-               ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
                error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
                if (error)
                        return error;
index e21459f9923a8a8ef47158e1099e8f772abb35dc..778b57b1f020f051aef1806e4bcf4c5ff7b9d7b4 100644 (file)
@@ -1765,7 +1765,10 @@ static int
 xfs_remount_ro(
        struct xfs_mount        *mp)
 {
-       int error;
+       struct xfs_icwalk       icw = {
+               .icw_flags      = XFS_ICWALK_FLAG_SYNC,
+       };
+       int                     error;
 
        /*
         * Cancel background eofb scanning so it cannot race with the final
@@ -1773,8 +1776,13 @@ xfs_remount_ro(
         */
        xfs_blockgc_stop(mp);
 
-       /* Get rid of any leftover CoW reservations... */
-       error = xfs_blockgc_free_space(mp, NULL);
+       /*
+        * Clear out all remaining COW staging extents and speculative post-EOF
+        * preallocations so that we don't leave inodes requiring inactivation
+        * cleanups during reclaim on a read-only mount.  We must process every
+        * cached inode, so this requires a synchronous cache scan.
+        */
+       error = xfs_blockgc_free_space(mp, &icw);
        if (error) {
                xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
                return error;
index 259ee2bda4926478f0feb3105d2404875ce5bbc5..b76dfb310ab650c24adfb2b6f3f329928e61a1fc 100644 (file)
@@ -1787,5 +1787,6 @@ static void __exit zonefs_exit(void)
 MODULE_AUTHOR("Damien Le Moal");
 MODULE_DESCRIPTION("Zone file system for zoned block devices");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("zonefs");
 module_init(zonefs_init);
 module_exit(zonefs_exit);
index 480f9207a4c6bcfdb065983281c2bc5e2b6071e0..88f21780447b0eada7365691fc11d065e685d91e 100644 (file)
@@ -505,6 +505,7 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  */
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
@@ -621,6 +622,22 @@ static inline bool acpi_device_always_present(struct acpi_device *adev)
 }
 #endif
 
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
+#else
+static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+{
+       return false;
+}
+static inline int
+acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+       *skip = false;
+       return 0;
+}
+#endif
+
 #ifdef CONFIG_PM
 void acpi_pm_wakeup_event(struct device *dev);
 acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
index 68e4d80c1b3266752f96a4510ad9d5fefe605f32..b5f594754a9e4f86da8d1e3208cf414707cee39f 100644 (file)
@@ -3,7 +3,6 @@
 #define __ACPI_NUMA_H
 
 #ifdef CONFIG_ACPI_NUMA
-#include <linux/kernel.h>
 #include <linux/numa.h>
 
 /* Proximity bitmap length */
index 73ba139143217dda4e4283d09481479a28275f27..7417731472b7a1ea8d7cacb4d618e41338674e08 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20210930
+#define ACPI_CA_VERSION                 0x20211217
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -454,9 +454,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
  * ACPI table load/unload interfaces
  */
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
-                           acpi_install_table(acpi_physical_address address,
-                                              u8 physical))
+                           acpi_install_table(struct acpi_table_header *table))
 
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+                           acpi_install_physical_table(acpi_physical_address
+                                                       address))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                            acpi_load_table(struct acpi_table_header *table,
                                            u32 *table_idx))
index 71ca090fd61b695446661b316ff432d30fd0d036..16847c8d9d5f37357011f65ce31f82e7bf937da7 100644 (file)
@@ -24,6 +24,7 @@
  * file. Useful because they make it more difficult to inadvertently type in
  * the wrong signature.
  */
+#define ACPI_SIG_AGDI           "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
 #define ACPI_SIG_BDAT           "BDAT" /* BIOS Data ACPI Table */
 #define ACPI_SIG_IORT           "IORT" /* IO Remapping Table */
 #define ACPI_SIG_IVRS           "IVRS" /* I/O Virtualization Reporting Structure */
@@ -48,6 +49,7 @@
 #define ACPI_SIG_SDEI           "SDEI" /* Software Delegated Exception Interface Table */
 #define ACPI_SIG_SDEV           "SDEV" /* Secure Devices table */
 #define ACPI_SIG_SVKL           "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_TDEL           "TDEL" /* TD Event Log Table */
 
 /*
  * All tables must be byte-packed to match the ACPI specification, since
@@ -154,7 +156,7 @@ typedef struct acpi_aest_processor_tlb {
 /* 2R: Processor Generic Resource Substructure */
 
 typedef struct acpi_aest_processor_generic {
-       u8 *resource;
+       u32 resource;
 
 } acpi_aest_processor_generic;
 
@@ -237,6 +239,25 @@ typedef struct acpi_aest_node_interrupt {
 #define ACPI_AEST_NODE_ERROR_RECOVERY       1
 #define ACPI_AEST_XRUPT_RESERVED            2  /* 2 and above are reserved */
 
+/*******************************************************************************
+ * AGDI - Arm Generic Diagnostic Dump and Reset Device Interface
+ *
+ * Conforms to "ACPI for Arm Components 1.1, Platform Design Document"
+ * ARM DEN0093 v1.1
+ *
+ ******************************************************************************/
+struct acpi_table_agdi {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u8 flags;
+       u8 reserved[3];
+       u32 sdei_event;
+       u32 gsiv;
+};
+
+/* Mask for Flags field above */
+
+#define ACPI_AGDI_SIGNALING_MODE (1)
+
 /*******************************************************************************
  *
  * BDAT - BIOS Data ACPI Table
@@ -1495,12 +1516,10 @@ struct acpi_nhlt_device_specific_config_a {
 
 /* Values for Config Type above */
 
-#define ACPI_NHLT_TYPE_MIC_ARRAY            0x01
-#define ACPI_NHLT_TYPE_GENERIC              0x00
-
-/* Mask for Extension field of array_type */
-
-#define ACPI_NHLT_ARRAY_TYPE_MASK           0x10
+#define ACPI_NHLT_CONFIG_TYPE_GENERIC              0x00
+#define ACPI_NHLT_CONFIG_TYPE_MIC_ARRAY            0x01
+#define ACPI_NHLT_CONFIG_TYPE_RENDER_FEEDBACK      0x03
+#define ACPI_NHLT_CONFIG_TYPE_RESERVED             0x04        /* 4 and above are reserved */
 
 struct acpi_nhlt_device_specific_config_b {
        u32 capabilities_size;
@@ -1511,6 +1530,11 @@ struct acpi_nhlt_device_specific_config_c {
        u8 virtual_slot;
 };
 
+struct acpi_nhlt_render_device_specific_config {
+       u32 capabilities_size;
+       u8 virtual_slot;
+};
+
 struct acpi_nhlt_wave_extensible {
        u16 format_tag;
        u16 channel_count;
@@ -1573,17 +1597,22 @@ struct acpi_nhlt_mic_device_specific_config {
 
 /* Values for array_type_ext above */
 
-#define SMALL_LINEAR_2ELEMENT               0x0A
-#define BIG_LINEAR_2ELEMENT                 0x0B
-#define FIRST_GEOMETRY_LINEAR_4ELEMENT      0x0C
-#define PLANAR_LSHAPED_4ELEMENT             0x0D
-#define SECOND_GEOMETRY_LINEAR_4ELEMENT     0x0E
-#define VENDOR_DEFINED                      0x0F
-#define ARRAY_TYPE_MASK                     0x0F
-#define ARRAY_TYPE_EXT_MASK                 0x10
+#define ACPI_NHLT_ARRAY_TYPE_RESERVED               0x09       // 9 and below are reserved
+#define ACPI_NHLT_SMALL_LINEAR_2ELEMENT             0x0A
+#define ACPI_NHLT_BIG_LINEAR_2ELEMENT               0x0B
+#define ACPI_NHLT_FIRST_GEOMETRY_LINEAR_4ELEMENT    0x0C
+#define ACPI_NHLT_PLANAR_LSHAPED_4ELEMENT           0x0D
+#define ACPI_NHLT_SECOND_GEOMETRY_LINEAR_4ELEMENT   0x0E
+#define ACPI_NHLT_VENDOR_DEFINED                    0x0F
+#define ACPI_NHLT_ARRAY_TYPE_MASK                   0x0F
+#define ACPI_NHLT_ARRAY_TYPE_EXT_MASK               0x10
+
+#define ACPI_NHLT_NO_EXTENSION                      0x0
+#define ACPI_NHLT_MIC_SNR_SENSITIVITY_EXT           (1<<4)
 
-#define NO_EXTENSION                        0x0
-#define MIC_SNR_SENSITIVITY_EXT             0x1
+struct acpi_nhlt_vendor_mic_count {
+       u8 microphone_count;
+};
 
 struct acpi_nhlt_vendor_mic_config {
        u8 type;
@@ -1603,22 +1632,25 @@ struct acpi_nhlt_vendor_mic_config {
 
 /* Values for Type field above */
 
-#define MIC_OMNIDIRECTIONAL                 0
-#define MIC_SUBCARDIOID                     1
-#define MIC_CARDIOID                        2
-#define MIC_SUPER_CARDIOID                  3
-#define MIC_HYPER_CARDIOID                  4
-#define MIC_8_SHAPED                        5
-#define MIC_VENDOR_DEFINED                  7
+#define ACPI_NHLT_MIC_OMNIDIRECTIONAL       0
+#define ACPI_NHLT_MIC_SUBCARDIOID           1
+#define ACPI_NHLT_MIC_CARDIOID              2
+#define ACPI_NHLT_MIC_SUPER_CARDIOID        3
+#define ACPI_NHLT_MIC_HYPER_CARDIOID        4
+#define ACPI_NHLT_MIC_8_SHAPED              5
+#define ACPI_NHLT_MIC_RESERVED6             6  // 6 is reserved
+#define ACPI_NHLT_MIC_VENDOR_DEFINED        7
+#define ACPI_NHLT_MIC_RESERVED              8  // 8 and above are reserved
 
 /* Values for Panel field above */
 
-#define MIC_TOP                             0
-#define MIC_BOTTOM                          1
-#define MIC_LEFT                            2
-#define MIC_RIGHT                           3
-#define MIC_FRONT                           4
-#define MIC_REAR                            5
+#define ACPI_NHLT_MIC_POSITION_TOP          0
+#define ACPI_NHLT_MIC_POSITION_BOTTOM       1
+#define ACPI_NHLT_MIC_POSITION_LEFT         2
+#define ACPI_NHLT_MIC_POSITION_RIGHT        3
+#define ACPI_NHLT_MIC_POSITION_FRONT        4
+#define ACPI_NHLT_MIC_POSITION_BACK         5
+#define ACPI_NHLT_MIC_POSITION_RESERVED     6  // 6 and above are reserved
 
 struct acpi_nhlt_vendor_mic_device_specific_config {
        struct acpi_nhlt_mic_device_specific_config mic_array_device_config;
@@ -1633,8 +1665,9 @@ struct acpi_nhlt_mic_snr_sensitivity_extension {
        u32 sensitivity;
 };
 
+/* Render device with feedback */
+
 struct acpi_nhlt_render_feedback_device_specific_config {
-       struct acpi_nhlt_device_specific_config device_config;
        u8 feedback_virtual_slot;       // render slot in case of capture
        u16 feedback_channels;  // informative only
        u16 feedback_valid_bits_per_sample;
@@ -1650,7 +1683,10 @@ struct acpi_nhlt_linux_specific_data {
        u8 device_id[16];
        u8 device_instance_id;
        u8 device_port_id;
-       u8 filler[18];
+};
+
+struct acpi_nhlt_linux_specific_data_b {
+       u8 specific_data[18];
 };
 
 struct acpi_nhlt_table_terminator {
@@ -2455,6 +2491,22 @@ enum acpi_svkl_format {
        ACPI_SVKL_FORMAT_RESERVED = 1   /* 1 and greater are reserved */
 };
 
+/*******************************************************************************
+ *
+ * TDEL - TD-Event Log
+ *        From: "Guest-Host-Communication Interface (GHCI) for Intel
+ *        Trust Domain Extensions (Intel TDX)".
+ *        September 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_tdel {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 reserved;
+       u64 log_area_minimum_length;
+       u64 log_area_start_address;
+};
+
 /* Reset to default packing */
 
 #pragma pack()
index ff8b3c913f21779bfbe8aa1b348161dff9266668..69e89d572b9e3077f9e4d79dcfdcd05fa09a522d 100644 (file)
@@ -509,7 +509,6 @@ typedef u64 acpi_integer;
 #define ACPI_TO_POINTER(i)              ACPI_CAST_PTR (void, (acpi_size) (i))
 #define ACPI_TO_INTEGER(p)              ACPI_PTR_DIFF (p, (void *) 0)
 #define ACPI_OFFSET(d, f)               ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
-#define ACPI_PHYSADDR_TO_PTR(i)         ACPI_TO_POINTER(i)
 #define ACPI_PTR_TO_PHYSADDR(i)         ACPI_TO_INTEGER(i)
 
 /* Optimizations for 4-character (32-bit) acpi_name manipulation */
@@ -536,8 +535,14 @@ typedef u64 acpi_integer;
  * Can be used with access_width of struct acpi_generic_address and access_size of
  * struct acpi_resource_generic_register.
  */
-#define ACPI_ACCESS_BIT_WIDTH(size)     (1 << ((size) + 2))
-#define ACPI_ACCESS_BYTE_WIDTH(size)    (1 << ((size) - 1))
+#define ACPI_ACCESS_BIT_SHIFT          2
+#define ACPI_ACCESS_BYTE_SHIFT         -1
+#define ACPI_ACCESS_BIT_MAX            (31 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_MAX           (31 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_DEFAULT                (8 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_DEFAULT       (8 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_WIDTH(size)    (1 << ((size) + ACPI_ACCESS_BIT_SHIFT))
+#define ACPI_ACCESS_BYTE_WIDTH(size)   (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT))
 
 /*******************************************************************************
  *
@@ -1098,6 +1103,14 @@ struct acpi_connection_info {
        u8 access_length;
 };
 
+/* Special Context data for PCC Opregion (ACPI 6.3) */
+
+struct acpi_pcc_info {
+       u8 subspace_id;
+       u16 length;
+       u8 *internal_buffer;
+};
+
 typedef
 acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
                                     u32 function,
@@ -1215,6 +1228,10 @@ struct acpi_mem_space_context {
        struct acpi_mem_mapping *first_mm;
 };
 
+struct acpi_data_table_space_context {
+       void *pointer;
+};
+
 /*
  * struct acpi_memory_list is used only if the ACPICA local cache is enabled
  */
index 683e124ad517d0ede959931935ad4ffee85b51fb..1940273719285e2b63e2abc9c78d4210fa449a8c 100644 (file)
@@ -2,11 +2,16 @@
 #ifndef __ACPI_PROCESSOR_H
 #define __ACPI_PROCESSOR_H
 
-#include <linux/kernel.h>
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 #include <linux/pm_qos.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
 #include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
 #include <asm/acpi.h>
 
 #define ACPI_PROCESSOR_CLASS           "processor"
index b28f8790192a22aba226ada8f6f96a8b12b30c12..93eaba2485e38338bba6b33214a50e2c0da4b6f7 100644 (file)
@@ -1389,6 +1389,12 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
 }
 #endif
 
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
 #ifdef CONFIG_ACPI
 extern void acpi_device_notify(struct device *dev);
 extern void acpi_device_notify_remove(struct device *dev);
index e7a163a3146b622e4890648432106b021cfc27b1..755f38e893be1b141e932403fc12d6ff93086795 100644 (file)
@@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
 struct bpf_trampoline *bpf_trampoline_get(u64 key,
                                          struct bpf_attach_target_info *tgt_info);
 void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
 #define BPF_DISPATCHER_INIT(_name) {                           \
        .mutex = __MUTEX_INITIALIZER(_name.mutex),              \
        .func = &_name##_func,                                  \
@@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex;
  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
  * these events can happen inside a region which holds a map bucket lock
  * and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
  */
 static inline void bpf_disable_instrumentation(void)
 {
        migrate_disable();
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_inc(bpf_prog_active);
-       else
-               __this_cpu_inc(bpf_prog_active);
+       this_cpu_inc(bpf_prog_active);
 }
 
 static inline void bpf_enable_instrumentation(void)
 {
-       if (IS_ENABLED(CONFIG_PREEMPT_RT))
-               this_cpu_dec(bpf_prog_active);
-       else
-               __this_cpu_dec(bpf_prog_active);
+       this_cpu_dec(bpf_prog_active);
        migrate_enable();
 }
 
index 203eef993d763fb92e6b870ec5a8f3e76d169485..0e1b6281fd8f6af0b9358e26bc814c83fc392921 100644 (file)
@@ -245,7 +245,10 @@ struct kfunc_btf_id_set {
        struct module *owner;
 };
 
-struct kfunc_btf_id_list;
+struct kfunc_btf_id_list {
+       struct list_head list;
+       struct mutex mutex;
+};
 
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                 struct kfunc_btf_id_set *s);
 bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
                              struct module *owner);
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
 #else
 static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
                                             struct kfunc_btf_id_set *s)
@@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
 {
        return false;
 }
+
+static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
+static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
 #endif
 
 #define DEFINE_KFUNC_BTF_ID_SET(set, name)                                     \
        struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set),     \
                                         THIS_MODULE }
 
-extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
-extern struct kfunc_btf_id_list prog_test_kfunc_list;
-
 #endif
index 2f909ed084c63e5e2dc429306eade714b93ba868..4ff37cb763ae2a243385ac0580e07574ff7546a8 100644 (file)
@@ -3,7 +3,6 @@
 #define _LINUX_CACHEINFO_H
 
 #include <linux/bitops.h>
-#include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/smp.h>
 
index 3d5af56337bdb5ea0c88dca48a45e7524bf86420..429dcebe2b9922a3153bc77d257d03b544926c80 100644 (file)
@@ -121,7 +121,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        asm volatile(__stringify_label(c) ":\n\t"                       \
                     ".pushsection .discard.reachable\n\t"              \
                     ".long " __stringify_label(c) "b - .\n\t"          \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define annotate_reachable() __annotate_reachable(__COUNTER__)
 
@@ -129,7 +129,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        asm volatile(__stringify_label(c) ":\n\t"                       \
                     ".pushsection .discard.unreachable\n\t"            \
                     ".long " __stringify_label(c) "b - .\n\t"          \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define annotate_unreachable() __annotate_unreachable(__COUNTER__)
 
index 8eacf67eb212e26fe71da59777df190d48929d6d..039e7e0c7378d68ca4ed278a2927668571ce11ac 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <linux/math.h>
+#include <linux/sched.h>
 
 extern unsigned long loops_per_jiffy;
 
@@ -58,7 +59,18 @@ void calibrate_delay(void);
 void __attribute__((weak)) calibration_delay_done(void);
 void msleep(unsigned int msecs);
 unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+                       unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+       usleep_range_state(min, max, TASK_IDLE);
+}
 
 static inline void ssleep(unsigned int seconds)
 {
index a498ebcf49933d309fff25b724d1a9244b807b3d..15e7c5e15d629545a757752bf61ffd26c8f24f4b 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/klist.h>
 #include <linux/pm.h>
 #include <linux/device/bus.h>
+#include <linux/module.h>
 
 /**
  * enum probe_type - device driver probe type to try
index 24b7ed2677afd26b0ffd1ce80f34469adc7129b0..7f1e88e3e2b5445ac4c3d5c3c8bab6d4d6f15b04 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_FILTER_H__
 
 #include <linux/atomic.h>
+#include <linux/bpf.h>
 #include <linux/refcount.h>
 #include <linux/compat.h>
 #include <linux/skbuff.h>
@@ -26,7 +27,6 @@
 
 #include <asm/byteorder.h>
 #include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
 
 struct sk_buff;
 struct sock;
@@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
  * This uses migrate_disable/enable() explicitly to document that the
  * invocation of a BPF program does not require reentrancy protection
  * against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
  */
 static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
                                          const void *ctx)
index b976c4177299523c6f35cea9b236aba146e70d64..8fcc38467af6e51f5af4b36685dc1a358dc39b70 100644 (file)
@@ -624,7 +624,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
 void free_pages_exact(void *virt, size_t size);
-__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
 
 #define __get_free_page(gfp_mask) \
                __get_free_pages((gfp_mask), 0)
index 9e067f937dbc24a82b1126542a2437078b0101c9..f453be385bd47f16a311b1df46e78df63a903022 100644 (file)
@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
        return hdev->ll_driver == driver;
 }
 
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+       return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
 #define        PM_HINT_FULLON  1<<5
 #define PM_HINT_NORMAL 1<<1
 
index fa2cd8c63dcc985cd6755b52700175346e70f346..24359b4a960537805898618a28e99b6ee1db44a5 100644 (file)
@@ -11,7 +11,7 @@
        asm volatile(__stringify(c) ": nop\n\t"                         \
                     ".pushsection .discard.instr_begin\n\t"            \
                     ".long " __stringify(c) "b - .\n\t"                \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define instrumentation_begin() __instrumentation_begin(__COUNTER__)
 
@@ -50,7 +50,7 @@
        asm volatile(__stringify(c) ": nop\n\t"                         \
                     ".pushsection .discard.instr_end\n\t"              \
                     ".long " __stringify(c) "b - .\n\t"                \
-                    ".popsection\n\t");                                \
+                    ".popsection\n\t" : : "i" (c));                    \
 })
 #define instrumentation_end() __instrumentation_end(__COUNTER__)
 #else
index e974caf39d3e3bfa7d09b577b488650edc5ceebf..8c8f7a4d93afb96518c18da10c7e8f5bee62ef2d 100644 (file)
@@ -153,6 +153,8 @@ struct kretprobe {
        struct kretprobe_holder *rph;
 };
 
+#define KRETPROBE_MAX_DATA_SIZE        4096
+
 struct kretprobe_instance {
        union {
                struct freelist_node freelist;
index 8adcf1fa8096f0acbc09f6f5958f94036aff14b6..9dc7cb239d21c2ae7f5e3053c2ece77c89e69f49 100644 (file)
@@ -405,8 +405,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
                                      phys_addr_t end, int nid, bool exact_nid);
 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
 
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
-                                             phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+                                                      phys_addr_t align)
 {
        return memblock_phys_alloc_range(size, align, 0,
                                         MEMBLOCK_ALLOC_ACCESSIBLE);
index 7239858790353a8cd03cac2a4e397166e88b2b31..a5cc4cdf9cc86fe76bec75022082b0b57f29501d 100644 (file)
@@ -663,6 +663,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
  */
 int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
 
+/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
 /**
  * mhi_download_rddm_image - Download ramdump image from device for
  *                           debugging purpose.
index 3636df90899a2431ecbf0acd66789a851db5a164..fbaab440a4846ecfab2ecaf7ae58f05a4dc974c3 100644 (file)
@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
        u8         regs_84_to_68[0x11];
        u8         tracer_registers[0x4];
 
-       u8         regs_63_to_32[0x20];
+       u8         regs_63_to_46[0x12];
+       u8         mrtc[0x1];
+       u8         regs_44_to_32[0xd];
+
        u8         regs_31_to_0[0x20];
 };
 
index 3ec42495a43a56dbd51fecd166d572a9e586e3e4..6aadcc0ecb5b05e82bffcd6e9c9e51f451ffd053 100644 (file)
@@ -1937,7 +1937,7 @@ enum netdev_ml_priv_type {
  *     @udp_tunnel_nic:        UDP tunnel offload state
  *     @xdp_state:             stores info on attached XDP BPF programs
  *
- *     @nested_level:  Used as as a parameter of spin_lock_nested() of
+ *     @nested_level:  Used as a parameter of spin_lock_nested() of
  *                     dev->addr_list_lock.
  *     @unlink_list:   As netif_addr_lock() can be called recursively,
  *                     keep a list of interfaces to be deleted.
@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
 {
        spin_lock(&txq->_xmit_lock);
-       txq->xmit_lock_owner = cpu;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, cpu);
 }
 
 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
 {
        spin_lock_bh(&txq->_xmit_lock);
-       txq->xmit_lock_owner = smp_processor_id();
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
 }
 
 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
 {
        bool ok = spin_trylock(&txq->_xmit_lock);
-       if (likely(ok))
-               txq->xmit_lock_owner = smp_processor_id();
+
+       if (likely(ok)) {
+               /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+               WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+       }
        return ok;
 }
 
 static inline void __netif_tx_unlock(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock(&txq->_xmit_lock);
 }
 
 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
 {
-       txq->xmit_lock_owner = -1;
+       /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+       WRITE_ONCE(txq->xmit_lock_owner, -1);
        spin_unlock_bh(&txq->_xmit_lock);
 }
 
index 60524645230514eaa4315a8d152dd102cc1177c7..d150a9082b31c0c485269fd6eba0d7ec003df553 100644 (file)
@@ -285,7 +285,6 @@ static inline struct inode *folio_inode(struct folio *folio)
 
 static inline bool page_cache_add_speculative(struct page *page, int count)
 {
-       VM_BUG_ON_PAGE(PageTail(page), page);
        return folio_ref_try_add_rcu((struct folio *)page, count);
 }
 
index b31d3f3312ce594c08e28dabf86efc207af67352..d73a1c08c3e3c030abee511294645d460ddb0570 100644 (file)
@@ -51,9 +51,9 @@
 #define _LINUX_PERCPU_REFCOUNT_H
 
 #include <linux/atomic.h>
-#include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/rcupdate.h>
+#include <linux/types.h>
 #include <linux/gfp.h>
 
 struct percpu_ref;
index 96e43fbb2dd89f151dcc68eed2fe88a7966d0d0c..cbf03a5f9cf5199fa47d674aacea34a7df8e632b 100644 (file)
@@ -538,11 +538,12 @@ struct macsec_ops;
  * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
  * @state: State of the PHY for management purposes
  * @dev_flags: Device-specific flags used by the PHY driver.
- *             Bits [15:0] are free to use by the PHY driver to communicate
- *                         driver specific behavior.
- *             Bits [23:16] are currently reserved for future use.
- *             Bits [31:24] are reserved for defining generic
- *                          PHY driver behavior.
+ *
+ *      - Bits [15:0] are free to use by the PHY driver to communicate
+ *        driver specific behavior.
+ *      - Bits [23:16] are currently reserved for future use.
+ *      - Bits [31:24] are reserved for defining generic
+ *        PHY driver behavior.
  * @irq: IRQ number of the PHY's interrupt (-1 if none)
  * @phy_timer: The timer for handling the state machine
  * @phylink: Pointer to phylink instance for this PHY
index 222da43b7096d27dbc96c3dc53ce34e20348216b..eddd66d426caf934e0244a6501cde8842328574d 100644 (file)
@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
  * pm_runtime_active - Check whether or not a device is runtime-active.
  * @dev: Target device.
  *
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
  * %RPM_ACTIVE, or %false otherwise.
  *
  * Note that the return value of this function can only be trusted if it is
index bd7a73db2e66cd6fcdc0fa1ced8bb949fb95ecfd..54cf566616aec28429b80236435bc6a8ee6fc9a1 100644 (file)
@@ -499,7 +499,8 @@ struct regulator_irq_data {
  *             best to shut-down regulator(s) or reboot the SOC if error
  *             handling is repeatedly failing. If fatal_cnt is given the IRQ
  *             handling is aborted if it fails for fatal_cnt times and die()
- *             callback (if populated) or BUG() is called to try to prevent
+ *             callback (if populated) is called. If die() is not populated
+ *             poweroff for the system is attempted in order to prevent any
  *             further damage.
  * @reread_ms: The time which is waited before attempting to re-read status
  *             at the worker if IC reading fails. Immediate re-read is done
@@ -516,11 +517,12 @@ struct regulator_irq_data {
  * @data:      Driver private data pointer which will be passed as such to
  *             the renable, map_event and die callbacks in regulator_irq_data.
  * @die:       Protection callback. If IC status reading or recovery actions
- *             fail fatal_cnt times this callback or BUG() is called. This
- *             callback should implement a final protection attempt like
- *             disabling the regulator. If protection succeeded this may
- *             return 0. If anything else is returned the core assumes final
- *             protection failed and calls BUG() as a last resort.
+ *             fail fatal_cnt times this callback is called or system is
+ *             powered off. This callback should implement a final protection
+ *             attempt like disabling the regulator. If protection succeeded
+ *             die() may return 0. If anything else is returned the core
+ *             assumes final protection failed and attempts to perform a
+ *             poweroff as a last resort.
  * @map_event: Driver callback to map IRQ status into regulator devices with
  *             events / errors. NOTE: callback MUST initialize both the
  *             errors and notifs for all rdevs which it signals having
index 6c9f19a33865ab857de84c576718ae95998d7be0..ce3c58286062c4d33e88e9fc1830621625a0bc4d 100644 (file)
 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
                         u64 *utime, u64 *stime);
 extern u64 task_gtime(struct task_struct *t);
 #else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
                                u64 *utime, u64 *stime)
 {
        *utime = t->utime;
        *stime = t->stime;
+       return false;
 }
 
 static inline u64 task_gtime(struct task_struct *t)
index bf21591a9e5e653585c26cb3f3f0857256c0eb89..0cda61855d90719e6175d0325598bbbe1b79254b 100644 (file)
@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
 }
 
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
 
 u64 siphash_1u64(const u64 a, const siphash_key_t *key);
 u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
 static inline u64 siphash(const void *data, size_t len,
                          const siphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
                return __siphash_unaligned(data, len, key);
-#endif
        return ___siphash_aligned(data, len, key);
 }
 
@@ -96,10 +93,8 @@ typedef struct {
 
 u32 __hsiphash_aligned(const void *data, size_t len,
                       const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key);
-#endif
 
 u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
 u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
 static inline u32 hsiphash(const void *data, size_t len,
                           const hsiphash_key_t *key)
 {
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+           !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
                return __hsiphash_unaligned(data, len, key);
-#endif
        return ___hsiphash_aligned(data, len, key);
 }
 
index c8cb7e697d479a7649eb0277024d4f89c4dcb548..4507d77d6941f2f0182bb8245890e14a6e5e199e 100644 (file)
@@ -286,6 +286,7 @@ struct nf_bridge_info {
 struct tc_skb_ext {
        __u32 chain;
        __u16 mru;
+       __u16 zone;
        bool post_ct;
 };
 #endif
@@ -1380,7 +1381,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
                    struct flow_dissector *flow_dissector,
                    void *target_container,
                    u16 *ctinfo_map, size_t mapsize,
-                   bool post_ct);
+                   bool post_ct, u16 zone);
 void
 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
                             struct flow_dissector *flow_dissector,
index a1f03461369bd9003e4a570521bc4b062a56d041..cf5999626e28d9df3d5f6508f84550995590f7eb 100644 (file)
@@ -195,7 +195,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
  * @offset:    offset of buffer in user space
  * @pages:     locked pages from userspace
  * @num_pages: number of locked pages
- * @dmabuf:    dmabuf used to for exporting to user space
+ * @refcount:  reference counter
  * @flags:     defined by TEE_SHM_* in tee_drv.h
  * @id:                unique id of a shared memory object on this device, shared
  *             with user space
@@ -214,7 +214,7 @@ struct tee_shm {
        unsigned int offset;
        struct page **pages;
        size_t num_pages;
-       struct dma_buf *dmabuf;
+       refcount_t refcount;
        u32 flags;
        int id;
        u64 sec_world_id;
index 04e87f4b9417c9fde533e2125d4eb36307a06562..a960de68ac69ee12939a95729e36513312e41a68 100644 (file)
@@ -7,9 +7,27 @@
 #include <uapi/linux/udp.h>
 #include <uapi/linux/virtio_net.h>
 
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+       switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+               return protocol == cpu_to_be16(ETH_P_IP);
+       case VIRTIO_NET_HDR_GSO_TCPV6:
+               return protocol == cpu_to_be16(ETH_P_IPV6);
+       case VIRTIO_NET_HDR_GSO_UDP:
+               return protocol == cpu_to_be16(ETH_P_IP) ||
+                      protocol == cpu_to_be16(ETH_P_IPV6);
+       default:
+               return false;
+       }
+}
+
 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
                                           const struct virtio_net_hdr *hdr)
 {
+       if (skb->protocol)
+               return 0;
+
        switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
        case VIRTIO_NET_HDR_GSO_TCPV4:
        case VIRTIO_NET_HDR_GSO_UDP:
@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        if (!skb->protocol) {
                                __be16 protocol = dev_parse_header_protocol(skb);
 
-                               virtio_net_hdr_set_proto(skb, hdr);
-                               if (protocol && protocol != skb->protocol)
+                               if (!protocol)
+                                       virtio_net_hdr_set_proto(skb, hdr);
+                               else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
                                        return -EINVAL;
+                               else
+                                       skb->protocol = protocol;
                        }
 retry:
                        if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
index 2d0df57c99024cb267618c4ddb7440bb63268dfa..851e07da2583fb231e647db74fbc6ce07cb6e2f0 100644 (file)
@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
 
 #define wake_up(x)                     __wake_up(x, TASK_NORMAL, 1, NULL)
 #define wake_up_nr(x, nr)              __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
 #define wake_up_interruptible_sync_poll_locked(x, m)                           \
        __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
 
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       /*
+        * For performance reasons, we don't always take the queue lock here.
+        * Therefore, we might race with someone removing the last entry from
+        * the queue, and proceed while they still hold the queue lock.
+        * However, rcu_read_lock() is required to be held in such cases, so we
+        * can safely proceed with an RCU-delayed free.
+        */
+       if (waitqueue_active(wq_head))
+               __wake_up_pollfree(wq_head);
+}
+
 #define ___wait_cond_timeout(condition)                                                \
 ({                                                                             \
        bool __cond = (condition);                                              \
index f6af76c87a6c3856e92ac438c6c93c4c6ab9eec2..191c36afa1f4aa543f9b3b57d35630f0e71c2584 100644 (file)
@@ -126,7 +126,7 @@ struct tlb_slave_info {
 struct alb_bond_info {
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        u32                     unbalanced_load;
-       int                     tx_rebalance_counter;
+       atomic_t                tx_rebalance_counter;
        int                     lp_counter;
        /* -------- rlb parameters -------- */
        int rlb_enabled;
index 4202c609bb0b09345c0f1c5105adf409a3a89f74..c4898fcbf923bf01f14c6bcc694eb036d75d7195 100644 (file)
@@ -132,6 +132,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 #ifdef CONFIG_NET_RX_BUSY_POLL
        if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
                WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+       sk_rx_queue_update(sk, skb);
+}
+
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+                                      const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
        sk_rx_queue_set(sk, skb);
 }
index 67634675e9197cdbd8225e0e4aa1547d8f09f036..df6622a5fe98f0a9732617bb2a757ef9c9611797 100644 (file)
@@ -79,6 +79,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
        dst_cache->reset_ts = jiffies;
 }
 
+/**
+ *     dst_cache_reset_now - invalidate the cache contents immediately
+ *     @dst_cache: the cache
+ *
+ *     The caller must be sure there are no concurrent users, as this frees
+ *     all dst_cache users immediately, rather than waiting for the next
+ *     per-cpu usage like dst_cache_reset does. Most callers should use the
+ *     higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
 /**
  *     dst_cache_init - initialize the cache, allocating the required storage
  *     @dst_cache: the cache
index 4b10676c69d1917e4c30e086bf8f00b1e0f37ed4..bd07484ab9dd5f9de0321f63393941b521a0b5fa 100644 (file)
@@ -69,7 +69,7 @@ struct fib_rules_ops {
        int                     (*action)(struct fib_rule *,
                                          struct flowi *, int,
                                          struct fib_lookup_arg *);
-       bool                    (*suppress)(struct fib_rule *,
+       bool                    (*suppress)(struct fib_rule *, int,
                                            struct fib_lookup_arg *);
        int                     (*match)(struct fib_rule *,
                                         struct flowi *, int);
@@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
                            struct fib_lookup_arg *arg));
 
 INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg));
 #endif
index ab5348e57db1a627cbce2dededb2e9b754d1f2cd..3417ba2d27ad6a1b5612a8855d2788f10d9fdf25 100644 (file)
@@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
-       return net->ipv4.fib_num_tclassid_users;
+       return atomic_read(&net->ipv4.fib_num_tclassid_users);
 }
 #else
 static inline int fib_num_tclassid_users(struct net *net)
index cc663c68ddc4ba464fcd2ba71da7a9f31da2fdbf..d24b0a34c8f0cd3571893e0c16b10fa391c0511d 100644 (file)
@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 /* jiffies until ct expires, 0 if already expired */
 static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
 {
-       s32 timeout = ct->timeout - nfct_time_stamp;
+       s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
        return timeout > 0 ? timeout : 0;
 }
 
 static inline bool nf_ct_is_expired(const struct nf_conn *ct)
 {
-       return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+       return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
 }
 
 /* use after obtaining a reference count */
@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
 static inline void nf_ct_offload_timeout(struct nf_conn *ct)
 {
        if (nf_ct_expires(ct) < NF_CT_DAY / 2)
-               ct->timeout = nfct_time_stamp + NF_CT_DAY;
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
 }
 
 struct kernel_param;
index 2f65701a43c953bd3a9a9e3d491882cb7bb11859..6c5b2efc4f17d0d17be750d0c1a2e1d169ec063e 100644 (file)
@@ -65,7 +65,7 @@ struct netns_ipv4 {
        bool                    fib_has_custom_local_routes;
        bool                    fib_offload_disabled;
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       int                     fib_num_tclassid_users;
+       atomic_t                fib_num_tclassid_users;
 #endif
        struct hlist_head       *fib_table_hash;
        struct sock             *fibnl;
index bf79f3a890af263dcb52056a56bbdf751e28064f..9e71691c491b7a1a00c5fb4ab55cb409704a3f3a 100644 (file)
@@ -193,4 +193,20 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
        skb->tstamp = ktime_set(0, 0);
 }
 
+struct tc_skb_cb {
+       struct qdisc_skb_cb qdisc_cb;
+
+       u16 mru;
+       bool post_ct;
+       u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+       struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+       BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+       return cb;
+}
+
 #endif
index 22179b2fda72a0a1c2fddc07eeda40716a443a46..c70e6d2b2fdd656d25a7844bd4b030ca25544e91 100644 (file)
@@ -447,8 +447,6 @@ struct qdisc_skb_cb {
        };
 #define QDISC_CB_PRIV_LEN 20
        unsigned char           data[QDISC_CB_PRIV_LEN];
-       u16                     mru;
-       bool                    post_ct;
 };
 
 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
index b32906e1ab55527b5418f203d3de05853863f166..d47e9658da28545c1f6afd9db0cf136b3e13d7b6 100644 (file)
@@ -431,7 +431,7 @@ struct sock {
 #ifdef CONFIG_XFRM
        struct xfrm_policy __rcu *sk_policy[2];
 #endif
-       struct dst_entry        *sk_rx_dst;
+       struct dst_entry __rcu  *sk_rx_dst;
        int                     sk_rx_dst_ifindex;
        u32                     sk_rx_dst_cookie;
 
@@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
        return -1;
 }
 
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+                                    const struct sk_buff *skb,
+                                    bool force_set)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        if (skb_rx_queue_recorded(skb)) {
                u16 rx_queue = skb_get_rx_queue(skb);
 
-               if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+               if (force_set ||
+                   unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
                        WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
        }
 #endif
 }
 
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+       __sk_rx_queue_set(sk, skb, false);
+}
+
 static inline void sk_rx_queue_clear(struct sock *sk)
 {
 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
@@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
  * @sk: socket
  *
  * Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
  * everything that's associated with %current.
  *
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
  *
  * Return: a per task page_frag if context allows that,
  * otherwise a per socket one.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_normal_context(sk->sk_allocation))
+       if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+           (__GFP_DIRECT_RECLAIM | __GFP_FS))
                return &current->task_frag;
 
        return &sk->sk_frag;
index 31f4c4f9aeea072bec17ef6b0f71a4a3186835ec..ac0893df9c76d4025046fbb1d0ce3f02fac322c2 100644 (file)
@@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr {
  */
 /* Descriptor for SST ASoC machine driver */
 struct snd_soc_acpi_mach {
-       const u8 id[ACPI_ID_LEN];
+       u8 id[ACPI_ID_LEN];
        const struct snd_soc_acpi_codecs *comp_ids;
        const u32 link_mask;
        const struct snd_soc_acpi_link_adr *links;
index 41b509f410bf9b7f50f2c2e31542f170ee28f38a..f9c520ce4bf4e4cd3128e298d0d06a415514046e 100644 (file)
@@ -29,7 +29,7 @@
 #define POLLRDHUP       0x2000
 #endif
 
-#define POLLFREE       (__force __poll_t)0x4000        /* currently only for epoll */
+#define POLLFREE       (__force __poll_t)0x4000
 
 #define POLL_BUSY_LOOP (__force __poll_t)0x8000
 
index a13e20cc66b45bf0c31e52d0e327b0ce624c6195..0512fde5e6978a83666bac4d92895d1a41b6ea1e 100644 (file)
@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
        __u64 ctx_set_params;
 };
 
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect.  The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
 #define DRM_IOCTL_VIRTGPU_MAP \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
 
index 2199adc6a6c20dfbde35e66da619464ef6b95782..80aa5c41a76364f29746ac2fdf94e81a241e6de5 100644 (file)
@@ -9,6 +9,7 @@
 #define __BIG_ENDIAN_BITFIELD
 #endif
 
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/swab.h>
 
index 601c904fd5cd92c860d91c01dcd4b21db963d072..cd98982e7523e11c1f92ac6d2b4ab24417967b7f 100644 (file)
@@ -9,6 +9,7 @@
 #define __LITTLE_ENDIAN_BITFIELD
 #endif
 
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/swab.h>
 
index 5da4ee234e0b7e677d65b0e887c5ca26d3002dcb..c0c2f3ed57298e2fe843b99c02d0714fa12da1ec 100644 (file)
 #define ETH_P_IFE      0xED3E          /* ForCES inter-FE LFB type */
 #define ETH_P_AF_IUCV   0xFBFB         /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
 
-#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN        0x0600          /* If the value in the ethernet type is more than this value
                                         * then the frame is Ethernet II. Else it is 802.3 */
 
 /*
index c8cc46f80a1619a5529732c6bf5b3c8083502330..f106a3941cdf35397503a9cbccf5b96bac5b81d1 100644 (file)
@@ -136,19 +136,21 @@ struct mptcp_info {
  * MPTCP_EVENT_REMOVED: token, rem_id
  * An address has been lost by the peer.
  *
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6,
- *                              daddr4 | daddr6, sport, dport, backup,
- *                              if_idx [, error]
+ * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
+ *                              saddr4 | saddr6, daddr4 | daddr6, sport,
+ *                              dport, backup, if_idx [, error]
  * A new subflow has been established. 'error' should not be set.
  *
- * MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- *                         sport, dport, backup, if_idx [, error]
+ * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ *                         daddr4 | daddr6, sport, dport, backup, if_idx
+ *                         [, error]
  * A subflow has been closed. An error (copy of sk_err) could be set if an
  * error has been detected for this subflow.
  *
- * MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6,
- *                           sport, dport, backup, if_idx [, error]
- *       The priority of a subflow has changed. 'error' should not be set.
+ * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ *                           daddr4 | daddr6, sport, dport, backup, if_idx
+ *                           [, error]
+ * The priority of a subflow has changed. 'error' should not be set.
  */
 enum mptcp_event_type {
        MPTCP_EVENT_UNSPEC = 0,
index 74ef57b38f9f51f0d41011914d2dae44c0987256..ac5d6a3031db707a9a61d6acd3c65e719b1097bf 100644 (file)
@@ -66,10 +66,17 @@ struct rlimit64 {
 #define _STK_LIM       (8*1024*1024)
 
 /*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
  */
-#define MLOCK_LIMIT    ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT    (8*1024*1024)
 
 /*
  * Due to binary compatibility, the actual resource numbers
index c204262d9fc24c90f666ddaad4baae36174bf94e..344081e71584b4e6cd0066fff48cd17389a5dd26 100644 (file)
@@ -17,6 +17,7 @@ struct xenbus_device;
 unsigned xen_evtchn_nr_channels(void);
 
 int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
                              irq_handler_t handler,
                              unsigned long irqflags, const char *devname,
index 121d37e700a62b53854c06199d9a89850ec39dd4..4cebadb5f30db908e1127550fe1d84a419d81d10 100644 (file)
@@ -718,7 +718,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
 {
        int rc = 0;
        struct sk_buff *skb;
-       static unsigned int failed = 0;
+       unsigned int failed = 0;
 
        /* NOTE: kauditd_thread takes care of all our locking, we just use
         *       the netlink info passed to us (e.g. sk and portid) */
@@ -735,32 +735,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
                        continue;
                }
 
+retry:
                /* grab an extra skb reference in case of error */
                skb_get(skb);
                rc = netlink_unicast(sk, skb, portid, 0);
                if (rc < 0) {
-                       /* fatal failure for our queue flush attempt? */
+                       /* send failed - try a few times unless fatal error */
                        if (++failed >= retry_limit ||
                            rc == -ECONNREFUSED || rc == -EPERM) {
-                               /* yes - error processing for the queue */
                                sk = NULL;
                                if (err_hook)
                                        (*err_hook)(skb);
-                               if (!skb_hook)
-                                       goto out;
-                               /* keep processing with the skb_hook */
+                               if (rc == -EAGAIN)
+                                       rc = 0;
+                               /* continue to drain the queue */
                                continue;
                        } else
-                               /* no - requeue to preserve ordering */
-                               skb_queue_head(queue, skb);
+                               goto retry;
                } else {
-                       /* it worked - drop the extra reference and continue */
+                       /* skb sent - drop the extra reference and continue */
                        consume_skb(skb);
                        failed = 0;
                }
        }
 
-out:
        return (rc >= 0 ? 0 : rc);
 }
 
@@ -1609,7 +1607,8 @@ static int __net_init audit_net_init(struct net *net)
                audit_panic("cannot initialize netlink socket in namespace");
                return -ENOMEM;
        }
-       aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+       /* limit the timeout in case auditd is blocked/stopped */
+       aunet->sk->sk_sndtimeo = HZ / 10;
 
        return 0;
 }
index dbc3ad07e21b66f19fe4308b0e07be89db7e443f..9bdb03767db5701fad75a00831dba9ebb947cfb6 100644 (file)
@@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
 
 /* BTF ID set registration API for modules */
 
-struct kfunc_btf_id_list {
-       struct list_head list;
-       struct mutex mutex;
-};
-
 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
 
 void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 {
        struct kfunc_btf_id_set *s;
 
-       if (!owner)
-               return false;
        mutex_lock(&klist->mutex);
        list_for_each_entry(s, &klist->list, list) {
                if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
@@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
        return false;
 }
 
-#endif
-
 #define DEFINE_KFUNC_BTF_ID_LIST(name)                                         \
        struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list),           \
                                          __MUTEX_INITIALIZER(name.mutex) };   \
@@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
 
 DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
 DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
+
+#endif
index 50efda51515b507b1b6e7949790e7ec78d70b93d..b532f1058d35f3f35bac760e29704b20f79f1252 100644 (file)
@@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
 
+static bool __reg32_bound_s64(s32 a)
+{
+       return a >= 0 && a <= S32_MAX;
+}
+
 static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
 {
        reg->umin_value = reg->u32_min_value;
        reg->umax_value = reg->u32_max_value;
-       /* Attempt to pull 32-bit signed bounds into 64-bit bounds
-        * but must be positive otherwise set to worse case bounds
-        * and refine later from tnum.
+
+       /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
+        * be positive otherwise set to worse case bounds and refine later
+        * from tnum.
         */
-       if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
-               reg->smax_value = reg->s32_max_value;
-       else
-               reg->smax_value = U32_MAX;
-       if (reg->s32_min_value >= 0)
+       if (__reg32_bound_s64(reg->s32_min_value) &&
+           __reg32_bound_s64(reg->s32_max_value)) {
                reg->smin_value = reg->s32_min_value;
-       else
+               reg->smax_value = reg->s32_max_value;
+       } else {
                reg->smin_value = 0;
+               reg->smax_value = U32_MAX;
+       }
 }
 
 static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
@@ -2379,8 +2385,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                 */
                if (insn->src_reg != BPF_REG_FP)
                        return 0;
-               if (BPF_SIZE(insn->code) != BPF_DW)
-                       return 0;
 
                /* dreg = *(u64 *)[fp - off] was a fill from the stack.
                 * that [fp - off] slot contains scalar that needs to be
@@ -2403,8 +2407,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                /* scalars can only be spilled into stack */
                if (insn->dst_reg != BPF_REG_FP)
                        return 0;
-               if (BPF_SIZE(insn->code) != BPF_DW)
-                       return 0;
                spi = (-insn->off - 1) / BPF_REG_SIZE;
                if (spi >= 64) {
                        verbose(env, "BUG spi %d\n", spi);
@@ -4551,9 +4553,16 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
 
        if (insn->imm == BPF_CMPXCHG) {
                /* Check comparison of R0 with memory location */
-               err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+               const u32 aux_reg = BPF_REG_0;
+
+               err = check_reg_arg(env, aux_reg, SRC_OP);
                if (err)
                        return err;
+
+               if (is_pointer_value(env, aux_reg)) {
+                       verbose(env, "R%d leaks addr into mem\n", aux_reg);
+                       return -EACCES;
+               }
        }
 
        if (is_pointer_value(env, insn->src_reg)) {
@@ -4588,13 +4597,19 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
                load_reg = -1;
        }
 
-       /* check whether we can read the memory */
+       /* Check whether we can read the memory, with second call for fetch
+        * case to simulate the register fill.
+        */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
-                              BPF_SIZE(insn->code), BPF_READ, load_reg, true);
+                              BPF_SIZE(insn->code), BPF_READ, -1, true);
+       if (!err && load_reg >= 0)
+               err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+                                      BPF_SIZE(insn->code), BPF_READ, load_reg,
+                                      true);
        if (err)
                return err;
 
-       /* check whether we can write into the same memory */
+       /* Check whether we can write into the same memory. */
        err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
                               BPF_SIZE(insn->code), BPF_WRITE, -1, true);
        if (err)
@@ -8308,6 +8323,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                         insn->dst_reg);
                                }
                                zext_32_to_64(dst_reg);
+
+                               __update_reg_bounds(dst_reg);
+                               __reg_deduce_bounds(dst_reg);
+                               __reg_bound_offset(dst_reg);
                        }
                } else {
                        /* case: R = imm
@@ -8422,7 +8441,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 
        new_range = dst_reg->off;
        if (range_right_open)
-               new_range--;
+               new_range++;
 
        /* Examples for register markings:
         *
index eb53f5ec62c900ff488532c67865cd79c377b35d..256cf6db573cd094ed505775f6405ecdbb03457b 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/buildid.h>
 #include <linux/crash_core.h>
+#include <linux/init.h>
 #include <linux/utsname.h>
 #include <linux/vmalloc.h>
 
@@ -295,6 +296,16 @@ int __init parse_crashkernel_low(char *cmdline,
                                "crashkernel=", suffix_tbl[SUFFIX_LOW]);
 }
 
+/*
+ * Add a dummy early_param handler to mark crashkernel= as a known command line
+ * parameter and suppress incorrect warnings in init/main.c.
+ */
+static int __init parse_crashkernel_dummy(char *arg)
+{
+       return 0;
+}
+early_param("crashkernel", parse_crashkernel_dummy);
+
 Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
                          void *data, size_t data_len)
 {
index e9db0c810554e2c6b209b15de9b04d504e4080d1..21eccc961bba31cad928b2bf6e9bfc325191e64e 100644 (file)
@@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp)
                }
        }
 
+       if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+               return -E2BIG;
+
        rp->kp.pre_handler = pre_handler_kretprobe;
        rp->kp.post_handler = NULL;
 
index 0c6a48dfcecb346e5fffbabbe9ed9655ce488a4f..1f25a4d7de27329fb94b8f62c2eeb462c9656c2c 100644 (file)
@@ -1380,7 +1380,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
                 *  - the VCPU on which owner runs is preempted
                 */
                if (!owner->on_cpu || need_resched() ||
-                   rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+                   !rt_mutex_waiter_is_top_waiter(lock, waiter) ||
                    vcpu_is_preempted(task_cpu(owner))) {
                        res = false;
                        break;
index 76f9deeaa942099798f4851fbd85feab7c55d244..77563109c0ea0111d9783a246585fd4d2b5e2531 100644 (file)
@@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq)
                };
        }
 
-       rq->uclamp_flags = 0;
+       rq->uclamp_flags = UCLAMP_FLAG_IDLE;
 }
 
 static void __init init_uclamp(void)
@@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str)
        int mode = sched_dynamic_mode(str);
        if (mode < 0) {
                pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
-               return 1;
+               return 0;
        }
 
        sched_dynamic_update(mode);
-       return 0;
+       return 1;
 }
 __setup("preempt=", setup_preempt_mode);
 
index 872e481d5098c84c6e604ab567cf5d4334d192da..9392aea1804e5d8512363f1a837cd27752ce8338 100644 (file)
@@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
                .sum_exec_runtime = p->se.sum_exec_runtime,
        };
 
-       task_cputime(p, &cputime.utime, &cputime.stime);
+       if (task_cputime(p, &cputime.utime, &cputime.stime))
+               cputime.sum_exec_runtime = task_sched_runtime(p);
        cputime_adjust(&cputime, &p->prev_cputime, ut, st);
 }
 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
@@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t)
  * add up the pending nohz execution time since the last
  * cputime snapshot.
  */
-void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
+bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 {
        struct vtime *vtime = &t->vtime;
        unsigned int seq;
        u64 delta;
+       int ret;
 
        if (!vtime_accounting_enabled()) {
                *utime = t->utime;
                *stime = t->stime;
-               return;
+               return false;
        }
 
        do {
+               ret = false;
                seq = read_seqcount_begin(&vtime->seqcount);
 
                *utime = t->utime;
@@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                if (vtime->state < VTIME_SYS)
                        continue;
 
+               ret = true;
                delta = vtime_delta(vtime);
 
                /*
@@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
                else
                        *utime += vtime->utime + delta;
        } while (read_seqcount_retry(&vtime->seqcount, seq));
+
+       return ret;
 }
 
 static int vtime_state_fetch(struct vtime *vtime, int cpu)
index 76577d1642a5dc6fa6eb15048210414bd0591265..eca38107b32f162adad278aee6e522bdfe04c026 100644 (file)
@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
 
+void __wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+       __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
+       /* POLLFREE must have cleared the queue. */
+       WARN_ON_ONCE(waitqueue_active(wq_head));
+}
+
 /*
  * Note: we use "set_current_state()" _after_ the wait-queue add,
  * because we need a memory barrier there on SMP, so that any
index a629b11bf3e0d9e5d02ba7341094c8b313ec3244..dfcee3888b00e14fd88316c19167bbec4c95743c 100644 (file)
@@ -4185,6 +4185,15 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
                                ss_mode != 0))
                        return -EINVAL;
 
+               /*
+                * Return before taking any locks if no actual
+                * sigaltstack changes were requested.
+                */
+               if (t->sas_ss_sp == (unsigned long)ss_sp &&
+                   t->sas_ss_size == ss_size &&
+                   t->sas_ss_flags == ss_flags)
+                       return 0;
+
                sigaltstack_lock();
                if (ss_mode == SS_DISABLE) {
                        ss_size = 0;
index 322b65d456767ad5858fc1f1a777b8d908768f79..41f470929e9913d842fdaf58afb29173c9fde403 100644 (file)
@@ -595,7 +595,8 @@ void irq_enter_rcu(void)
 {
        __irq_enter_raw();
 
-       if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
+       if (tick_nohz_full_cpu(smp_processor_id()) ||
+           (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)))
                tick_irq_enter();
 
        account_hardirq_enter(current);
index 6bffe5af8cb1134d96749dbd55092d8c2bb54f22..17a283ce2b20fb0903f4fb7e4aaa5b13a9f5244b 100644 (file)
@@ -1375,6 +1375,13 @@ static inline void tick_nohz_irq_enter(void)
        now = ktime_get();
        if (ts->idle_active)
                tick_nohz_stop_idle(ts, now);
+       /*
+        * If all CPUs are idle. We may need to update a stale jiffies value.
+        * Note nohz_full is a special case: a timekeeper is guaranteed to stay
+        * alive but it might be busy looping with interrupts disabled in some
+        * rare case (typically stop machine). So we must make sure we have a
+        * last resort.
+        */
        if (ts->tick_stopped)
                tick_nohz_update_jiffies(now);
 }
index b348749a9fc628e9fa94de8af56ffee3e536096c..dcdcb85121e40ad6319dc768e0a33f9233c15202 100644 (file)
@@ -1306,8 +1306,7 @@ int do_settimeofday64(const struct timespec64 *ts)
        timekeeping_forward_now(tk);
 
        xt = tk_xtime(tk);
-       ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
-       ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
+       ts_delta = timespec64_sub(*ts, xt);
 
        if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
                ret = -EINVAL;
index e3d2c23c413d42bf153144870ca2cab995b3ecb8..85f1021ad45955026eba845727e53c87693ed2db 100644 (file)
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
 EXPORT_SYMBOL(msleep_interruptible);
 
 /**
- * usleep_range - Sleep for an approximate time
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
+ * usleep_range_state - Sleep for an approximate time in a given state
+ * @min:       Minimum time in usecs to sleep
+ * @max:       Maximum time in usecs to sleep
+ * @state:     State of the current task that will be while sleeping
  *
  * In non-atomic context where the exact wakeup time is flexible, use
- * usleep_range() instead of udelay().  The sleep improves responsiveness
+ * usleep_range_state() instead of udelay().  The sleep improves responsiveness
  * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
  * power usage by allowing hrtimers to take advantage of an already-
  * scheduled interrupt instead of scheduling a new one just for this sleep.
  */
-void __sched usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range_state(unsigned long min, unsigned long max,
+                               unsigned int state)
 {
        ktime_t exp = ktime_add_us(ktime_get(), min);
        u64 delta = (u64)(max - min) * NSEC_PER_USEC;
 
        for (;;) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               __set_current_state(state);
                /* Do not return before the requested sleep time has elapsed */
                if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
                        break;
        }
 }
-EXPORT_SYMBOL(usleep_range);
+EXPORT_SYMBOL(usleep_range_state);
index 30bc880c3849cb6f1ff386b515fd7ffebf4baa7d..be5f6b32a01221398693a57069786c975f2a1b60 100644 (file)
@@ -5217,6 +5217,7 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
 {
        struct ftrace_direct_func *direct;
        struct ftrace_func_entry *entry;
+       struct ftrace_hash *hash;
        int ret = -ENODEV;
 
        mutex_lock(&direct_mutex);
@@ -5225,7 +5226,8 @@ int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
        if (!entry)
                goto out_unlock;
 
-       if (direct_functions->count == 1)
+       hash = direct_ops.func_hash->filter_hash;
+       if (hash->count == 1)
                unregister_ftrace_function(&direct_ops);
 
        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
@@ -5540,6 +5542,10 @@ int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
        err = unregister_ftrace_function(ops);
        remove_direct_functions_hash(hash, addr);
        mutex_unlock(&direct_mutex);
+
+       /* cleanup for possible another register call */
+       ops->func = NULL;
+       ops->trampoline = 0;
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
index 9555b8e1d1e386b1699163648507532961f05dcd..319f9c8ca7e7d8d4da9ef77e2c5eab9c78c63568 100644 (file)
@@ -3757,7 +3757,7 @@ static int check_synth_field(struct synth_event *event,
 
        if (strcmp(field->type, hist_field->type) != 0) {
                if (field->size != hist_field->size ||
-                   field->is_signed != hist_field->is_signed)
+                   (!field->is_string && field->is_signed != hist_field->is_signed))
                        return -EINVAL;
        }
 
index 22db3ce95e74f2fffa774656d43204692605b0ae..ca9c13b2ecf4ba44e72b4a0a6d391bc5897dbeaf 100644 (file)
@@ -1237,9 +1237,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                                                  argv + consumed, &consumed,
                                                  &field_version);
                        if (IS_ERR(field)) {
-                               argv_free(argv);
                                ret = PTR_ERR(field);
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        /*
@@ -1262,18 +1261,19 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        if (cmd_version > 1 && n_fields_this_loop >= 1) {
                                synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        fields[n_fields++] = field;
                        if (n_fields == SYNTH_FIELDS_MAX) {
                                synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
                                ret = -EINVAL;
-                               goto err;
+                               goto err_free_arg;
                        }
 
                        n_fields_this_loop++;
                }
+               argv_free(argv);
 
                if (consumed < argc) {
                        synth_err(SYNTH_ERR_INVALID_CMD, 0);
@@ -1281,7 +1281,6 @@ static int __create_synth_event(const char *name, const char *raw_fields)
                        goto err;
                }
 
-               argv_free(argv);
        }
 
        if (n_fields == 0) {
@@ -1307,6 +1306,8 @@ static int __create_synth_event(const char *name, const char *raw_fields)
        kfree(saved_fields);
 
        return ret;
+ err_free_arg:
+       argv_free(argv);
  err:
        for (i = 0; i < n_fields; i++)
                free_synth_field(fields[i]);
index 39bb56d2dcbef650f1309a8fb3098cd9aab37812..9628b557184688485b586b3d130c414cc09e6442 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/sort.h>
+#include <linux/kmemleak.h>
 
 #include "tracing_map.h"
 #include "trace.h"
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
        for (i = 0; i < a->n_pages; i++) {
                if (!a->pages[i])
                        break;
+               kmemleak_free(a->pages[i]);
                free_page((unsigned long)a->pages[i]);
        }
 
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
                a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
                if (!a->pages[i])
                        goto free;
+               kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
        }
  out:
        return a;
index 4f5613dac2273a1b96feb809e2a5f1449932769b..7b32c356ebc5cacba2e82b8f40cceb59a9161e8c 100644 (file)
@@ -264,15 +264,16 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
 long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
 {
        struct ucounts *iter;
+       long max = LONG_MAX;
        long ret = 0;
 
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               long max = READ_ONCE(iter->ns->ucount_max[type]);
                long new = atomic_long_add_return(v, &iter->ucount[type]);
                if (new < 0 || new > max)
                        ret = LONG_MAX;
                else if (iter == ucounts)
                        ret = new;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
        }
        return ret;
 }
@@ -312,15 +313,16 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
 {
        /* Caller must hold a reference to ucounts */
        struct ucounts *iter;
+       long max = LONG_MAX;
        long dec, ret = 0;
 
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               long max = READ_ONCE(iter->ns->ucount_max[type]);
                long new = atomic_long_add_return(1, &iter->ucount[type]);
                if (new < 0 || new > max)
                        goto unwind;
                if (iter == ucounts)
                        ret = new;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
                /*
                 * Grab an extra ucount reference for the caller when
                 * the rlimit count was previously 0.
@@ -339,15 +341,16 @@ unwind:
        return 0;
 }
 
-bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long rlimit)
 {
        struct ucounts *iter;
-       if (get_ucounts_value(ucounts, type) > max)
-               return true;
+       long max = rlimit;
+       if (rlimit > LONG_MAX)
+               max = LONG_MAX;
        for (iter = ucounts; iter; iter = iter->ns->ucounts) {
-               max = READ_ONCE(iter->ns->ucount_max[type]);
                if (get_ucounts_value(iter, type) > max)
                        return true;
+               max = READ_ONCE(iter->ns->ucount_max[type]);
        }
        return false;
 }
index 5c12bde10996cf97b5f075d318089b1be73f71d7..5e14e32056add21469af2c9c77d17cfb25e2976f 100644 (file)
@@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
        bool "Generate BTF typeinfo"
        depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
        depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+       depends on BPF_SYSCALL
        help
          Generate deduplicated BTF type information from DWARF debug info.
          Turning this on expects presence of pahole tool, which will convert
index a90112ee72a1fee70ddab19281d354734b9b1bec..72b9068ab57bffbb308e1c449218aa205f5a8ffd 100644 (file)
@@ -49,6 +49,7 @@
        SIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
        POSTAMBLE
 }
 EXPORT_SYMBOL(__siphash_unaligned);
-#endif
 
 /**
  * siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
        HSIPROUND; \
        return (v0 ^ v1) ^ (v2 ^ v3);
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
        HSIPROUND; \
        return v1 ^ v3;
 
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
 {
        const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
 
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 u32 __hsiphash_unaligned(const void *data, size_t len,
                         const hsiphash_key_t *key)
 {
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
        HPOSTAMBLE
 }
 EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
 
 /**
  * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
index 28edafc820adfc9b434cbc76fbb774992a45ff5b..356f4f2c779e5fda55abcd8bc5b652d85e5af054 100644 (file)
@@ -428,7 +428,7 @@ config THP_SWAP
 # UP and nommu archs use km based percpu allocator
 #
 config NEED_PER_CPU_KM
-       depends on !SMP
+       depends on !SMP || !MMU
        bool
        default y
 
index 1eead47610112257ca01c5ff306c64716959af8f..eae96dfe0261cfd70c38ee67c4e998fc4cd09a64 100644 (file)
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
        wb_shutdown(&bdi->wb);
        cgwb_bdi_unregister(bdi);
 
+       /*
+        * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
+        * update the global bdi_min_ratio.
+        */
+       if (bdi->min_ratio)
+               bdi_set_min_ratio(bdi, 0);
+
        if (bdi->dev) {
                bdi_debug_unregister(bdi);
                device_unregister(bdi->dev);
index c381b3c525d0bd23739012b081a311a10ec00fd6..e924978952025b003711a6260db375a8bb21996f 100644 (file)
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
        for (i = 0; i < nr_ids; i++) {
                t = damon_new_target(ids[i]);
                if (!t) {
-                       pr_err("Failed to alloc damon_target\n");
                        /* The caller should do cleanup of the ids itself */
                        damon_for_each_target_safe(t, next, ctx)
                                damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
                    unsigned long aggr_int, unsigned long primitive_upd_int,
                    unsigned long min_nr_reg, unsigned long max_nr_reg)
 {
-       if (min_nr_reg < 3) {
-               pr_err("min_nr_regions (%lu) must be at least 3\n",
-                               min_nr_reg);
+       if (min_nr_reg < 3)
                return -EINVAL;
-       }
-       if (min_nr_reg > max_nr_reg) {
-               pr_err("invalid nr_regions.  min (%lu) > max (%lu)\n",
-                               min_nr_reg, max_nr_reg);
+       if (min_nr_reg > max_nr_reg)
                return -EINVAL;
-       }
 
        ctx->sample_interval = sample_int;
        ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
 
 static void kdamond_usleep(unsigned long usecs)
 {
-       if (usecs > 100 * 1000)
-               schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+       /* See Documentation/timers/timers-howto.rst for the thresholds */
+       if (usecs > 20 * USEC_PER_MSEC)
+               schedule_timeout_idle(usecs_to_jiffies(usecs));
        else
-               usleep_range(usecs, usecs + 1);
+               usleep_idle_range(usecs, usecs + 1);
 }
 
 /* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
                                ctx->callback.after_sampling(ctx))
                        done = true;
 
-               usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+               kdamond_usleep(ctx->sample_interval);
 
                if (ctx->primitive.check_accesses)
                        max_nr_accesses = ctx->primitive.check_accesses(ctx);
index 9b520bb4a3e70d4074f353c79a084593e79cf74a..4fbd729edc9e7cd297bb05579493ace53e7f569c 100644 (file)
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
                                &wmarks.low, &parsed);
                if (ret != 18)
                        break;
-               if (!damos_action_valid(action)) {
-                       pr_err("wrong action %d\n", action);
+               if (!damos_action_valid(action))
                        goto fail;
-               }
 
                pos += parsed;
                scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
@@ -652,10 +650,12 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx)
        if (!targetid_is_pid(ctx))
                return;
 
+       mutex_lock(&ctx->kdamond_lock);
        damon_for_each_target_safe(t, next, ctx) {
                put_pid((struct pid *)t->id);
                damon_destroy_target(t);
        }
+       mutex_unlock(&ctx->kdamond_lock);
 }
 
 static struct damon_ctx *dbgfs_new_ctx(void)
index ecfd0b2ed222d065d97e1061704ef0fa96e5ad51..6a1b9272ea123a1f2991ad632daf17ef0b173c76 100644 (file)
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                                struct damon_addr_range *three_regions,
                                unsigned long *expected, int nr_expected)
 {
-       struct damon_ctx *ctx = damon_new_ctx();
        struct damon_target *t;
        struct damon_region *r;
        int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
                damon_add_region(r, t);
        }
-       damon_add_target(ctx, t);
 
        damon_va_apply_three_regions(t, three_regions);
 
@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
                KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
                KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
        }
-
-       damon_destroy_ctx(ctx);
 }
 
 /*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
                        new_three_regions, expected, ARRAY_SIZE(expected));
 }
 
-static void damon_test_split_evenly(struct kunit *test)
+static void damon_test_split_evenly_fail(struct kunit *test,
+               unsigned long start, unsigned long end, unsigned int nr_pieces)
 {
-       struct damon_ctx *c = damon_new_ctx();
-       struct damon_target *t;
-       struct damon_region *r;
-       unsigned long i;
-
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
-                       -EINVAL);
-
-       t = damon_new_target(42);
-       r = damon_new_region(0, 100);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
 
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
-               KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+               KUNIT_EXPECT_EQ(test, r->ar.start, start);
+               KUNIT_EXPECT_EQ(test, r->ar.end, end);
        }
+
        damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+       unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+       struct damon_target *t = damon_new_target(42);
+       struct damon_region *r = damon_new_region(start, end);
+       unsigned long expected_width = (end - start) / nr_pieces;
+       unsigned long i = 0;
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 59);
        damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+       KUNIT_EXPECT_EQ(test,
+                       damon_va_evenly_split_region(t, r, nr_pieces), 0);
+       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
 
-       i = 0;
        damon_for_each_region(r, t) {
-               if (i == 4)
+               if (i == nr_pieces - 1)
                        break;
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+               KUNIT_EXPECT_EQ(test,
+                               r->ar.start, start + i++ * expected_width);
+               KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
        }
-       KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
-       KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+       KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
+       KUNIT_EXPECT_EQ(test, r->ar.end, end);
        damon_free_target(t);
+}
 
-       t = damon_new_target(42);
-       r = damon_new_region(5, 6);
-       damon_add_region(r, t);
-       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
-       KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+static void damon_test_split_evenly(struct kunit *test)
+{
+       KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+                       -EINVAL);
 
-       damon_for_each_region(r, t) {
-               KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
-               KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
-       }
-       damon_free_target(t);
-       damon_destroy_ctx(c);
+       damon_test_split_evenly_fail(test, 0, 100, 0);
+       damon_test_split_evenly_succ(test, 0, 100, 10);
+       damon_test_split_evenly_succ(test, 5, 59, 5);
+       damon_test_split_evenly_fail(test, 5, 6, 2);
 }
 
 static struct kunit_case damon_test_cases[] = {
index 35fe49080ee99636f045d3f8c785319947eadf4f..20a9a9d69eb19371e26b84cdc840e75e31b479d6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/pagewalk.h>
+#include <linux/sched/mm.h>
 
 #include "prmtv-common.h"
 
@@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
        case DAMOS_STAT:
                return 0;
        default:
-               pr_warn("Wrong action %d\n", scheme->action);
                return -EINVAL;
        }
 
index daa0e23a6ee666b4fd7b7ab3b95ebd7ea156569e..39c4c46c61337e24b76cfeff9fe0dbc79f85be7d 100644 (file)
@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
                        goto skip;
                if (!PageUptodate(page) || PageReadahead(page))
                        goto skip;
-               if (PageHWPoison(page))
-                       goto skip;
                if (!trylock_page(page))
                        goto skip;
                if (page->mapping != mapping)
index abcd1785c629c4ebf049419bc7ffc33dec1a5bd0..a1baa198519a2da44a0406c7b02c7c48d3118a39 100644 (file)
@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
        struct huge_bootmem_page *m = NULL; /* initialize for clang */
        int nr_nodes, node;
 
-       if (nid >= nr_online_nodes)
+       if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
                return 0;
        /* do node specific alloc */
        if (nid != NUMA_NO_NODE) {
index 09945784df9e656171b0dc40bb023f7ced26fad3..a19154a8d1964bd72391df9e344d26a23e1bd656 100644 (file)
@@ -683,6 +683,7 @@ static const struct file_operations objects_fops = {
        .open = open_objects,
        .read = seq_read,
        .llseek = seq_lseek,
+       .release = seq_release,
 };
 
 static int __init kfence_debugfs_init(void)
index 6863a834ed42573de49771e680a6329edec9ec7f..2ed5f2a0879d3b5a95274ade98d601534ecb1975 100644 (file)
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
        rcu_read_unlock();
 }
 
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
-                                    struct pglist_data *pgdat,
-                                    enum node_stat_item idx, int nr)
-{
-       struct mem_cgroup *memcg;
-       struct lruvec *lruvec;
-
-       rcu_read_lock();
-       memcg = obj_cgroup_memcg(objcg);
-       lruvec = mem_cgroup_lruvec(memcg, pgdat);
-       mod_memcg_lruvec_state(lruvec, idx, nr);
-       rcu_read_unlock();
-}
-
 /**
  * __count_memcg_events - account VM events in a cgroup
  * @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
 }
 #endif
 
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
-       struct memcg_stock_pcp *stock;
-
-       if (likely(in_task())) {
-               *pflags = 0UL;
-               preempt_disable();
-               stock = this_cpu_ptr(&memcg_stock);
-               return &stock->task_obj;
-       }
-
-       local_irq_save(*pflags);
-       stock = this_cpu_ptr(&memcg_stock);
-       return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
-       if (likely(in_task()))
-               preempt_enable();
-       else
-               local_irq_restore(flags);
-}
-
 /**
  * consume_stock: Try to consume stocked charge on this cpu.
  * @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
  */
 #define OBJCGS_CLEAR_MASK      (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
 
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+       struct memcg_stock_pcp *stock;
+
+       if (likely(in_task())) {
+               *pflags = 0UL;
+               preempt_disable();
+               stock = this_cpu_ptr(&memcg_stock);
+               return &stock->task_obj;
+       }
+
+       local_irq_save(*pflags);
+       stock = this_cpu_ptr(&memcg_stock);
+       return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+       if (likely(in_task()))
+               preempt_enable();
+       else
+               local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+                                    struct pglist_data *pgdat,
+                                    enum node_stat_item idx, int nr)
+{
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       lruvec = mem_cgroup_lruvec(memcg, pgdat);
+       mod_memcg_lruvec_state(lruvec, idx, nr);
+       rcu_read_unlock();
+}
+
 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
                                 gfp_t gfp, bool new_page)
 {
index 07c875fdeaf0ca82e86bb8db8b32164a2d4357e0..3a274468f193e8d95499a51d76f75279f206993c 100644 (file)
@@ -1470,17 +1470,12 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
        if (!(flags & MF_COUNT_INCREASED)) {
                res = get_hwpoison_page(p, flags);
                if (!res) {
-                       /*
-                        * Check "filter hit" and "race with other subpage."
-                        */
                        lock_page(head);
-                       if (PageHWPoison(head)) {
-                               if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
-                                   || (p != head && TestSetPageHWPoison(head))) {
+                       if (hwpoison_filter(p)) {
+                               if (TestClearPageHWPoison(head))
                                        num_poisoned_pages_dec();
-                                       unlock_page(head);
-                                       return 0;
-                               }
+                               unlock_page(head);
+                               return 0;
                        }
                        unlock_page(head);
                        res = MF_FAILED;
@@ -2239,6 +2234,7 @@ retry:
        } else if (ret == 0) {
                if (soft_offline_free_page(page) && try_again) {
                        try_again = false;
+                       flags &= ~MF_COUNT_INCREASED;
                        goto retry;
                }
        }
index 852041f6be418c317db8a40c94cf21d501ba9cb3..2a9627dc784c31072f39527f0a52953f72257aff 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/memblock.h>
 #include <linux/compaction.h>
 #include <linux/rmap.h>
+#include <linux/module.h>
 
 #include <asm/tlbflush.h>
 
index 10e9c87260edea7af123620da62d12945696c3a4..f6248affaf38c9b7ca535387bd6e13c5a967b06a 100644 (file)
@@ -2140,8 +2140,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                         * memory with both reclaim and compact as well.
                         */
                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
-                               page = __alloc_pages_node(hpage_node,
-                                                               gfp, order);
+                               page = __alloc_pages(gfp, order, hpage_node, nmask);
 
                        goto out;
                }
index a8626825a8299377d617219dc34ca60e34873d10..abe7db581d686607277e3112647e893fa6ccad4c 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5081,6 +5081,7 @@ struct loc_track {
        unsigned long max;
        unsigned long count;
        struct location *loc;
+       loff_t idx;
 };
 
 static struct dentry *slab_debugfs_root;
@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
 #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
 static int slab_debugfs_show(struct seq_file *seq, void *v)
 {
-
-       struct location *l;
-       unsigned int idx = *(unsigned int *)v;
        struct loc_track *t = seq->private;
+       struct location *l;
+       unsigned long idx;
 
+       idx = (unsigned long) t->idx;
        if (idx < t->count) {
                l = &t->loc[idx];
 
@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
 {
        struct loc_track *t = seq->private;
 
-       v = ppos;
-       ++*ppos;
+       t->idx = ++(*ppos);
        if (*ppos <= t->count)
-               return v;
+               return ppos;
 
        return NULL;
 }
 
 static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
 {
+       struct loc_track *t = seq->private;
+
+       t->idx = *ppos;
        return ppos;
 }
 
index 16f706c55d925900bd6932177fba221ea3f8f56f..2b553184058372e09e2d1b34576c65606bc6c65f 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/swap_slots.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
index 2f34bbdde0e8fe8ea6c6ffc9a4d866380d26d132..cfca99e295b80ef5c9128184c452f28d3639faae 100644 (file)
@@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev)
 again:
        ax25_for_each(s, &ax25_list) {
                if (s->ax25_dev == ax25_dev) {
-                       s->ax25_dev = NULL;
                        spin_unlock_bh(&ax25_list_lock);
+                       lock_sock(s->sk);
+                       s->ax25_dev = NULL;
+                       release_sock(s->sk);
                        ax25_disconnect(s, ENETUNREACH);
                        spin_lock_bh(&ax25_list_lock);
 
index db4ab2c2ce18b02b0ac29a8b86b09e85e1a6f969..891cfcf45644b71c75af68d23974804398108d0d 100644 (file)
@@ -337,7 +337,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
 
                args[2] = get_bridge_ifindices(net, indices, args[2]);
 
-               ret = copy_to_user(uarg, indices,
+               ret = copy_to_user((void __user *)args[1], indices,
                                   array_size(args[2], sizeof(int)))
                        ? -EFAULT : args[2];
 
index 15ac064b5562d7b99f885610a3d12733b63aa325..c4708e2487fb67e09df99ff0cb2f17cb51795b5d 100644 (file)
@@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
                return skb;
 
        /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
-       qdisc_skb_cb(skb)->mru = 0;
-       qdisc_skb_cb(skb)->post_ct = false;
+       tc_skb_cb(skb)->mru = 0;
+       tc_skb_cb(skb)->post_ct = false;
        mini_qdisc_bstats_cpu_update(miniq, skb);
 
        switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
 
-               if (txq->xmit_lock_owner != cpu) {
+               /* Other cpus might concurrently change txq->xmit_lock_owner
+                * to -1 or to their cpu id, but not to our id.
+                */
+               if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
                        if (dev_xmit_recursion())
                                goto recursion_alert;
 
@@ -5100,8 +5103,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        }
 
        qdisc_skb_cb(skb)->pkt_len = skb->len;
-       qdisc_skb_cb(skb)->mru = 0;
-       qdisc_skb_cb(skb)->post_ct = false;
+       tc_skb_cb(skb)->mru = 0;
+       tc_skb_cb(skb)->post_ct = false;
        skb->tc_at_ingress = 1;
        mini_qdisc_bstats_cpu_update(miniq, skb);
 
index 5ad72dbfcd0797ae045734b83fbbdc090ae3ff53..c06c9ba6e8c5ea00a3999700a6724a404c1f05f9 100644 (file)
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                return err;
        }
 
-       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
-           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
-           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
-               dest_net = devlink_netns_get(skb, info);
-               if (IS_ERR(dest_net))
-                       return PTR_ERR(dest_net);
-       }
-
        if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
                action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
        else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
                }
        }
+       if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+           info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+           info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+               dest_net = devlink_netns_get(skb, info);
+               if (IS_ERR(dest_net))
+                       return PTR_ERR(dest_net);
+       }
+
        err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
 
        if (dest_net)
index be74ab4551c204a0ddf2cfc17c886d0f36f5be98..0ccfd5fa5cb9b5f608ab6715a718cd57ee4eecab 100644 (file)
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
        free_percpu(dst_cache->cache);
 }
 EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+       int i;
+
+       if (!dst_cache->cache)
+               return;
+
+       dst_cache->reset_ts = jiffies;
+       for_each_possible_cpu(i) {
+               struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+               struct dst_entry *dst = idst->dst;
+
+               idst->cookie = 0;
+               idst->dst = NULL;
+               dst_release(dst);
+       }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
index 79df7cd9dbc16d5bd91394bce15ba5e3fd8244c9..1bb567a3b329cd06534f3e0fa27a463e06e538cc 100644 (file)
@@ -323,7 +323,7 @@ jumped:
                if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
                                                              fib6_rule_suppress,
                                                              fib4_rule_suppress,
-                                                             rule, arg))
+                                                             rule, flags, arg))
                        continue;
 
                if (err != -EAGAIN) {
index 3255f57f5131af315f0148909e69ff4c91e66b94..1b094c481f1d0469de929f6091328c36ef25bc2d 100644 (file)
@@ -238,7 +238,7 @@ void
 skb_flow_dissect_ct(const struct sk_buff *skb,
                    struct flow_dissector *flow_dissector,
                    void *target_container, u16 *ctinfo_map,
-                   size_t mapsize, bool post_ct)
+                   size_t mapsize, bool post_ct, u16 zone)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        struct flow_dissector_key_ct *key;
@@ -260,6 +260,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
        if (!ct) {
                key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
                                TCA_FLOWER_KEY_CT_FLAGS_INVALID;
+               key->ct_zone = zone;
                return;
        }
 
index 72ba027c34cfea6f38a9e78927c35048ebfe7a7f..dda12fbd177ba6ad2798ea2b07733fa3f03441ab 100644 (file)
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
 
        ASSERT_RTNL();
 
-       n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+       n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
        if (!n)
                goto out;
 
-       n->protocol = 0;
        write_pnet(&n->net, net);
        memcpy(n->key, pkey, key_len);
        n->dev = dev;
index ba2f38246f07e5ba5a4f97922b4be33bdb8ad6d6..909db87d7383d25232e1f8ea7b7426b291a5d1c8 100644 (file)
@@ -832,7 +832,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
               ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
 
        if (dev)
-               printk("%sdev name=%s feat=0x%pNF\n",
+               printk("%sdev name=%s feat=%pNF\n",
                       level, dev->name, &dev->features);
        if (sk)
                printk("%ssk family=%hu type=%u proto=%u\n",
index 1ae52ac943f626c2845ad892d83550b5c0afbd65..8eb671c827f90f1f3d2514163fc82998c9906cb6 100644 (file)
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_parser, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
 
 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
 {
+       psock_set_prog(&psock->progs.stream_verdict, NULL);
+       psock_set_prog(&psock->progs.skb_verdict, NULL);
+
        if (!psock->saved_data_ready)
                return;
 
index f39ef79ced679dd19b2b3746363a7b9e6ef7b047..4ca4b11f4e5ffde88c1a8d7edb1ad21cf24e726d 100644 (file)
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
                write_lock_bh(&sk->sk_callback_lock);
                if (strp_stop)
                        sk_psock_stop_strp(sk, psock);
-               else
+               if (verdict_stop)
                        sk_psock_stop_verdict(sk, psock);
+
+               if (psock->psock_update_sk_prot)
+                       psock->psock_update_sk_prot(sk, psock, false);
                write_unlock_bh(&sk->sk_callback_lock);
        }
 }
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
 
        if (msg_parser)
                psock_set_prog(&psock->progs.msg_parser, msg_parser);
+       if (stream_parser)
+               psock_set_prog(&psock->progs.stream_parser, stream_parser);
+       if (stream_verdict)
+               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+       if (skb_verdict)
+               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
 
        ret = sock_map_init_proto(sk, psock);
        if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
                ret = sk_psock_init_strp(sk, psock);
                if (ret)
                        goto out_unlock_drop;
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
-               psock_set_prog(&psock->progs.stream_parser, stream_parser);
                sk_psock_start_strp(sk, psock);
        } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
                sk_psock_start_verdict(sk,psock);
        } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
-               psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
                sk_psock_start_verdict(sk, psock);
        }
        write_unlock_bh(&sk->sk_callback_lock);
index de1c849a0a70558c87886c6d0006cd8d0f38c310..4ed74d509d6ac75071b7958ea29a6318c4f41069 100644 (file)
@@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
        void *injection;
        __be32 *prefix;
        u32 rew_op = 0;
+       u64 qos_class;
 
        ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
 
+       qos_class = netdev_get_num_tc(netdev) ?
+                   netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+
        injection = skb_push(skb, OCELOT_TAG_LEN);
        prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
 
@@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
        memset(injection, 0, OCELOT_TAG_LEN);
        ocelot_ifh_set_bypass(injection, 1);
        ocelot_ifh_set_src(injection, ds->num_ports);
-       ocelot_ifh_set_qos_class(injection, skb->priority);
+       ocelot_ifh_set_qos_class(injection, qos_class);
        ocelot_ifh_set_vlan_tci(injection, vlan_tci);
        ocelot_ifh_set_tag_type(injection, tag_type);
 
index 38b44c0291b11af2c6d59c532c0c4ad4bba2ce27..96f4180aabd2e58f14d2a6e45780ee853b36fe15 100644 (file)
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
        if (dev->dev.parent)
                pm_runtime_get_sync(dev->dev.parent);
 
-       if (!netif_device_present(dev)) {
+       if (!netif_device_present(dev) ||
+           dev->reg_state == NETREG_UNREGISTERING) {
                ret = -ENODEV;
                goto err;
        }
index 0189e3cd4a7df2dc2ea7121182ee290e0164df90..6b5956500436187d5e9b801ebb6f8f52ba0db090 100644 (file)
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
 
        kfree(rcu_dereference_protected(inet->inet_opt, 1));
        dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
-       dst_release(sk->sk_rx_dst);
+       dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
        sk_refcnt_debug_dec(sk);
 }
 EXPORT_SYMBOL(inet_sock_destruct);
index 9fe13e4f5d08a5cf9cd9ff15033b9f6e0dc9e492..4d61ddd8a0ecfc4cc47b4802eb5a573beb84ee44 100644 (file)
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
        int error;
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
-       net->ipv4.fib_num_tclassid_users = 0;
+       atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
 #endif
        error = ip_fib_net_init(net);
        if (error < 0)
index ce54a30c2ef1e8e79c8922be5eee35055fa51178..d279cb8ac1584487885f66819634b421c01bf819 100644 (file)
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
        if (tb[FRA_FLOW]) {
                rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
                if (rule4->tclassid)
-                       net->ipv4.fib_num_tclassid_users++;
+                       atomic_inc(&net->ipv4.fib_num_tclassid_users);
        }
 #endif
 
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
 
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (((struct fib4_rule *)rule)->tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        net->ipv4.fib_has_custom_rules = true;
 
index 3364cb9c67e018fea2b2e370046de5252581b996..fde7797b580694bb3924c5c6e9560cf04fd67387 100644 (file)
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
 {
 #ifdef CONFIG_IP_ROUTE_CLASSID
        if (fib_nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users--;
+               atomic_dec(&net->ipv4.fib_num_tclassid_users);
 #endif
        fib_nh_common_release(&fib_nh->nh_common);
 }
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
 #ifdef CONFIG_IP_ROUTE_CLASSID
        nh->nh_tclassid = cfg->fc_flow;
        if (nh->nh_tclassid)
-               net->ipv4.fib_num_tclassid_users++;
+               atomic_inc(&net->ipv4.fib_num_tclassid_users);
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        nh->fib_nh_weight = nh_weight;
index f7fea3a7c5e64b92ca9c6b56293628923649e58c..62a67fdc344cd21505a84c905c1e2c05cc0ff866 100644 (file)
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
 
        sk_node_init(&nreq_sk->sk_node);
        nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
        nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
 #endif
        nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
index c8fa6e7f7d1241691e048c868878b388e04b6f80..581b5b2d72a5bcc9c1ddf2b3664ef9ff669f63a5 100644 (file)
@@ -261,6 +261,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
        r->idiag_retrans = 0;
+       r->idiag_expires = 0;
 
        if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
                                     sk_user_ns(NETLINK_CB(cb->skb).sk),
@@ -314,9 +315,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                r->idiag_retrans = icsk->icsk_probes_out;
                r->idiag_expires =
                        jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
-       } else {
-               r->idiag_timer = 0;
-               r->idiag_expires = 0;
        }
 
        if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
index bbb3d39c69afc2d5a42c6ace8d473657861da61f..2bb28bfd83bf621b5d9f3fb7ce5695195697b43a 100644 (file)
@@ -3012,8 +3012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
-       dst_release(sk->sk_rx_dst);
-       sk->sk_rx_dst = NULL;
+       dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
        tcp_saved_syn_free(tp);
        tp->compressed_ack = 0;
        tp->segs_in = 0;
index 246ab7b5e857eb9e802c4805075e89c98cf00636..0ce46849ec3d4595699dd54229919b2d66b70257 100644 (file)
@@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
        trace_tcp_probe(sk, skb);
 
        tcp_mstamp_refresh(tp);
-       if (unlikely(!sk->sk_rx_dst))
+       if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
                inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
        /*
         *      Header prediction.
index 13d868c43284584ee0c58ddfd411bb52c8b0c830..084df223b5dff8089a615a4a8d392b620fc0a28a 100644 (file)
@@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
        struct sock *rsk;
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               struct dst_entry *dst = sk->sk_rx_dst;
+               struct dst_entry *dst;
+
+               dst = rcu_dereference_protected(sk->sk_rx_dst,
+                                               lockdep_sock_is_held(sk));
 
                sock_rps_save_rxhash(sk, skb);
                sk_mark_napi_id(sk, skb);
@@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                        if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
                            !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
                                             dst, 0)) {
+                               RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
                                dst_release(dst);
-                               sk->sk_rx_dst = NULL;
                        }
                }
                tcp_rcv_established(sk, skb);
@@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+                       struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, 0);
@@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
 
        if (dst && dst_hold_safe(dst)) {
-               sk->sk_rx_dst = dst;
+               rcu_assign_pointer(sk->sk_rx_dst, dst);
                sk->sk_rx_dst_ifindex = skb->skb_iif;
        }
 }
index cf913a66df17023bbab8b42e313ce646858c268a..7c2d3ac2363acebcfd92d7a4886c052c8aa120b9 100644 (file)
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
        int ret = 0;
        int state = child->sk_state;
 
-       /* record NAPI ID of child */
-       sk_mark_napi_id(child, skb);
+       /* record sk_napi_id and sk_rx_queue_mapping of child. */
+       sk_mark_napi_id_set(child, skb);
 
        tcp_segs_in(tcp_sk(child), skb);
        if (!sock_owned_by_user(child)) {
index 8bcecdd6aeda8f6af5ba541138082fdc1ef59c93..15c6b450b8dba44d5f344a554eea6b991c1ea5f1 100644 (file)
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        kfree_skb(skb);
                        return -EINVAL;
                }
-               if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+               if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
                        kfree_skb(skb);
                        return -EINVAL;
                }
@@ -2250,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
        struct dst_entry *old;
 
        if (dst_hold_safe(dst)) {
-               old = xchg(&sk->sk_rx_dst, dst);
+               old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
                dst_release(old);
                return old != dst;
        }
@@ -2440,7 +2440,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                struct dst_entry *dst = skb_dst(skb);
                int ret;
 
-               if (unlikely(sk->sk_rx_dst != dst))
+               if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
                        udp_sk_rx_dst_set(sk, dst);
 
                ret = udp_unicast_rcv_skb(sk, skb, uh);
@@ -2599,7 +2599,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = READ_ONCE(sk->sk_rx_dst);
+       dst = rcu_dereference(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, 0);
index 40f3e4f9f33a238ae9d748d278aea37769a83f57..dcedfe29d9d932a3a85491021557005228860ffe 100644 (file)
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
 }
 
 INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+                                               int flags,
                                                struct fib_lookup_arg *arg)
 {
        struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
        return false;
 
 suppress_route:
-       if (!(arg->flags & FIB_LOOKUP_NOREF))
-               ip6_rt_put(rt);
+       ip6_rt_put_flags(rt, flags);
        return true;
 }
 
index 1b9827ff8ccf48e61e233e39d671aa67c8fff0ab..1cbd49d5788dd4cfb1b3224bed49df90f75b5d20 100644 (file)
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
                 * memcmp() alone below is sufficient, right?
                 */
                 if ((first_word & htonl(0xF00FFFFF)) ||
-                   !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
-                   !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
-                   *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+                    !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+                    !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+                    *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
 not_same_flow:
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
index 3adc5d9211ad695bb1cdf66405be3dfe1a539c76..d64855010948db23eb5ebe5ce0bc4dfff2634afb 100644 (file)
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
                hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
 
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+               /* the control block has been erased, so we have to set the
+                * iif once again.
+                * We read the receiving interface index directly from the
+                * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+                * ip_rcv_core(...)).
+                */
+               IP6CB(skb)->iif = skb->skb_iif;
        }
 
        hdr->nexthdr = NEXTHDR_ROUTING;
index 1b57ee36d6682e04085aa271c6c5c09e6e3a7b7e..8a3618a30632a8fab997edff82065a194dcaac1b 100644 (file)
@@ -1933,7 +1933,6 @@ static int __net_init sit_init_net(struct net *net)
        return 0;
 
 err_reg_dev:
-       ipip6_dev_free(sitn->fb_tunnel_dev);
        free_netdev(sitn->fb_tunnel_dev);
 err_alloc_dev:
        return err;
index 551fce49841d7f53a111b0435855634cece2b40a..680e6481b9672040ccb41fd08ab80166575bef50 100644 (file)
@@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
        if (dst && dst_hold_safe(dst)) {
                const struct rt6_info *rt = (const struct rt6_info *)dst;
 
-               sk->sk_rx_dst = dst;
+               rcu_assign_pointer(sk->sk_rx_dst, dst);
                sk->sk_rx_dst_ifindex = skb->skb_iif;
                sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
        }
@@ -1505,7 +1505,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
 
        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-               struct dst_entry *dst = sk->sk_rx_dst;
+               struct dst_entry *dst;
+
+               dst = rcu_dereference_protected(sk->sk_rx_dst,
+                                               lockdep_sock_is_held(sk));
 
                sock_rps_save_rxhash(sk, skb);
                sk_mark_napi_id(sk, skb);
@@ -1513,8 +1516,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                        if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
                            INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
                                            dst, sk->sk_rx_dst_cookie) == NULL) {
+                               RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
                                dst_release(dst);
-                               sk->sk_rx_dst = NULL;
                        }
                }
 
@@ -1874,7 +1877,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
                skb->sk = sk;
                skb->destructor = sock_edemux;
                if (sk_fullsock(sk)) {
-                       struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+                       struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
 
                        if (dst)
                                dst = dst_check(dst, sk->sk_rx_dst_cookie);
index e43b31d25fb61c7875f3bb8a93eb74da244d912a..a2caca6ccf114546f9e4ea854ad67208b2f3873e 100644 (file)
@@ -956,7 +956,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                struct dst_entry *dst = skb_dst(skb);
                int ret;
 
-               if (unlikely(sk->sk_rx_dst != dst))
+               if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
                        udp6_sk_rx_dst_set(sk, dst);
 
                if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1070,7 +1070,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = READ_ONCE(sk->sk_rx_dst);
+       dst = rcu_dereference(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, sk->sk_rx_dst_cookie);
index 470ff0ce3dc7634a52c738a87bff3d23aae7b520..7d2925bb966e03e164298581cab09d3a6eb802d9 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 /**
@@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
        sband = ieee80211_get_sband(sdata);
        if (!sband)
                return;
-       he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type);
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(&sdata->vif));
        if (!he_cap)
                return;
 
index 430a585875388f841001c3dfc6a14453179c7618..74a878f213d3ef352d0d934f288664ed3a1a263f 100644 (file)
@@ -9,7 +9,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  */
 
 #include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.addba_req.start_seq_num =
                                        cpu_to_le16(start_seq_num << 4);
 
-       ieee80211_tx_skb(sdata, skb);
+       ieee80211_tx_skb_tid(sdata, skb, tid);
 }
 
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
        struct ieee80211_txq *txq = sta->sta.txq[tid];
        struct txq_info *txqi;
 
+       lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
        if (!txq)
                return;
 
@@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
        ieee80211_assign_tid_tx(sta, tid, NULL);
 
        ieee80211_agg_splice_finish(sta->sdata, tid);
-       ieee80211_agg_start_txq(sta, tid, false);
 
        kfree_rcu(tid_tx, rcu_head);
 }
@@ -480,8 +481,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
 
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
-                                    tid_tx->dialog_token,
-                                    sta->tid_seq[tid] >> 4,
+                                    tid_tx->dialog_token, tid_tx->ssn,
                                     buf_size, tid_tx->timeout);
 
        WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
@@ -523,6 +523,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 
        params.ssn = sta->tid_seq[tid] >> 4;
        ret = drv_ampdu_action(local, sdata, &params);
+       tid_tx->ssn = params.ssn;
        if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
                return;
        } else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
@@ -889,6 +890,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        bool send_delba = false;
+       bool start_txq = false;
 
        ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
               sta->sta.addr, tid);
@@ -906,10 +908,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
                send_delba = true;
 
        ieee80211_remove_tid_tx(sta, tid);
+       start_txq = true;
 
  unlock_sta:
        spin_unlock_bh(&sta->lock);
 
+       if (start_txq)
+               ieee80211_agg_start_txq(sta, tid, false);
+
        if (send_delba)
                ieee80211_send_delba(sdata, sta->sta.addr, tid,
                        WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
index bd3d3195097faf2726614a6c32adc567b57455d3..2d0dd69f9753ced49e03a40628e4c88550a64208 100644 (file)
@@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 
 error:
+       mutex_lock(&local->mtx);
        ieee80211_vif_release_channel(sdata);
+       mutex_unlock(&local->mtx);
+
        return err;
 }
 
index cd3731cbf6c6814dbc266b404774ae733bd05d8e..c336267f4599c430812d67ba1fb97cd5404a7481 100644 (file)
@@ -1219,8 +1219,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
 
-       if (local->in_reconfig)
+       /* In reconfig don't transmit now, but mark for waking later */
+       if (local->in_reconfig) {
+               set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
                return;
+       }
 
        if (!check_sdata_in_driver(sdata))
                return;
index 54ab0e1ef6ca5828ac56596bce0bb096f31311e6..37f7d975f3dac66bb5a8056203c7e3a58aeff57c 100644 (file)
@@ -2452,11 +2452,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
                                           u16 tx_time)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u16 tid = ieee80211_get_tid(hdr);
-       int ac = ieee80211_ac_from_tid(tid);
-       struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+       u16 tid;
+       int ac;
+       struct ieee80211_sta_tx_tspec *tx_tspec;
        unsigned long now = jiffies;
 
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       tid = ieee80211_get_tid(hdr);
+       ac = ieee80211_ac_from_tid(tid);
+       tx_tspec = &ifmgd->tx_tspec[ac];
+
        if (likely(!tx_tspec->admitted_time))
                return;
 
index 9541a4c30aca7c071d254c987839f723728312eb..0544563ede522493bba3053650932c81110dff85 100644 (file)
@@ -2944,6 +2944,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        if (!fwd_skb)
                goto out;
 
+       fwd_skb->dev = sdata->dev;
        fwd_hdr =  (struct ieee80211_hdr *) fwd_skb->data;
        fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
        info = IEEE80211_SKB_CB(fwd_skb);
index 51b49f0d3ad48ac1a5466d58618286eaeeee7dae..537535a88990cf86877713b4cf46d086f6cc2fc0 100644 (file)
@@ -644,13 +644,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* check if STA exists already */
        if (sta_info_get_bss(sdata, sta->sta.addr)) {
                err = -EEXIST;
-               goto out_err;
+               goto out_cleanup;
        }
 
        sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
        if (!sinfo) {
                err = -ENOMEM;
-               goto out_err;
+               goto out_cleanup;
        }
 
        local->num_sta++;
@@ -667,6 +667,15 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 
        list_add_tail_rcu(&sta->list, &local->sta_list);
 
+       /* update channel context before notifying the driver about state
+        * change, this enables driver using the updated channel context right away.
+        */
+       if (sta->sta_state >= IEEE80211_STA_ASSOC) {
+               ieee80211_recalc_min_chandef(sta->sdata);
+               if (!sta->sta.support_p2p_ps)
+                       ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+       }
+
        /* notify driver */
        err = sta_info_insert_drv_state(local, sdata, sta);
        if (err)
@@ -674,12 +683,6 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 
        set_sta_flag(sta, WLAN_STA_INSERTED);
 
-       if (sta->sta_state >= IEEE80211_STA_ASSOC) {
-               ieee80211_recalc_min_chandef(sta->sdata);
-               if (!sta->sta.support_p2p_ps)
-                       ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
-       }
-
        /* accept BA sessions now */
        clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
@@ -706,8 +709,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
  out_drop_sta:
        local->num_sta--;
        synchronize_net();
+ out_cleanup:
        cleanup_single_sta(sta);
- out_err:
        mutex_unlock(&local->sta_mtx);
        kfree(sinfo);
        rcu_read_lock();
index ba279678200843aeb4f5ccde641271a901d37765..379fd367197f9e035504536a34ffc115b00d5b63 100644 (file)
@@ -176,6 +176,7 @@ struct sta_info;
  * @failed_bar_ssn: ssn of the last failed BAR tx attempt
  * @bar_pending: BAR needs to be re-sent
  * @amsdu: support A-MSDU withing A-MDPU
+ * @ssn: starting sequence number of the session
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_tx {
        u8 stop_initiator;
        bool tx_stop;
        u16 buf_size;
+       u16 ssn;
 
        u16 failed_bar_ssn;
        bool bar_pending;
index 278945e3e08acc846782411b02307c09705d839b..86a54df3aabdd2e697bcbb206815ba7cf382b180 100644 (file)
@@ -1822,15 +1822,15 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
        ieee80211_tx_result res = TX_CONTINUE;
 
+       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+               CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
        if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
                __skb_queue_tail(&tx->skbs, tx->skb);
                tx->skb = NULL;
                goto txh_done;
        }
 
-       if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
-               CALL_TXH(ieee80211_tx_h_rate_ctrl);
-
        CALL_TXH(ieee80211_tx_h_michael_mic_add);
        CALL_TXH(ieee80211_tx_h_sequence);
        CALL_TXH(ieee80211_tx_h_fragment);
@@ -4191,11 +4191,11 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 
        ieee80211_aggr_check(sdata, sta, skb);
 
+       sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
+
        if (sta) {
                struct ieee80211_fast_tx *fast_tx;
 
-               sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-
                fast_tx = rcu_dereference(sta->fast_tx);
 
                if (fast_tx &&
index 43df2f0c5db9c52b2484196946d2008b22ff3cf0..0e4e1956bcea1f874542f27862b27116d9441c77 100644 (file)
@@ -943,7 +943,12 @@ static void ieee80211_parse_extension_element(u32 *crc,
                                              struct ieee802_11_elems *elems)
 {
        const void *data = elem->data + 1;
-       u8 len = elem->datalen - 1;
+       u8 len;
+
+       if (!elem->datalen)
+               return;
+
+       len = elem->datalen - 1;
 
        switch (elem->data[0]) {
        case WLAN_EID_EXT_HE_MU_EDCA:
@@ -2063,7 +2068,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                chandef.chan = chan;
 
        skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
-                                    100 + ie_len);
+                                    local->scan_ies_len + ie_len);
        if (!skb)
                return NULL;
 
@@ -2646,6 +2651,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                mutex_unlock(&local->sta_mtx);
        }
 
+       /*
+        * If this is for hw restart things are still running.
+        * We may want to change that later, however.
+        */
+       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
+               drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
+
        if (local->in_reconfig) {
                local->in_reconfig = false;
                barrier();
@@ -2664,13 +2676,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                        IEEE80211_QUEUE_STOP_REASON_SUSPEND,
                                        false);
 
-       /*
-        * If this is for hw restart things are still running.
-        * We may want to change that later, however.
-        */
-       if (local->open_count && (!suspended || reconfig_due_to_wowlan))
-               drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
-
        if (!suspended)
                return 0;
 
index 46c44823edb7dd03a10804f737ae281ebb04b1ba..cdf09c2a7007a764f61c0cbc6ee977afe9bcd8e4 100644 (file)
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 }
 
 static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
-                            unsigned int daddr_extent)
+                            unsigned int daddr_extent, unsigned char type)
 {
        struct net *net = dev_net(mdev->dev);
        struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
 
        list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
                if (rt->dev == mdev &&
-                   rt->min == daddr_start && rt->max == daddr_end) {
+                   rt->min == daddr_start && rt->max == daddr_end &&
+                   rt->type == type) {
                        list_del_rcu(&rt->list);
                        /* TODO: immediate RTM_DELROUTE */
                        mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
 
 int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
 {
-       return mctp_route_remove(mdev, addr, 0);
+       return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
 }
 
 /* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (rtm->rtm_type != RTN_UNICAST)
                return -EINVAL;
 
-       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+       rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
        return rc;
 }
 
index cc6b8803aa9d0a7bec28f01daef8d458bd90fe46..7b7918702592a738452ec34159f83f57c33b5f20 100644 (file)
@@ -12,7 +12,7 @@
 static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
-       kfree(skb);
+       kfree_skb(skb);
        return NETDEV_TX_OK;
 }
 
index ffeb2df8be7ae8da3d00b71bfdc90694db7f7f29..0c7bde1c14a6a879a3263fae3390720c796a3610 100644 (file)
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                goto err;
 
        /* Find the output device */
-       out_dev = rcu_dereference(nh->nh_dev);
+       out_dev = nh->nh_dev;
        if (!mpls_output_possible(out_dev))
                goto tx_err;
 
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
            (dev->addr_len != nh->nh_via_alen))
                goto errout;
 
-       RCU_INIT_POINTER(nh->nh_dev, dev);
+       nh->nh_dev = dev;
 
        if (!(dev->flags & IFF_UP)) {
                nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
        kfree(mdev);
 }
 
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
        struct net *net = dev_net(dev);
-       u8 alive, deleted;
        unsigned index;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        for (index = 0; index < net->mpls.platform_labels; index++) {
                struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+               bool nh_del = false;
+               u8 alive = 0;
 
                if (!rt)
                        continue;
 
-               alive = 0;
-               deleted = 0;
+               if (event == NETDEV_UNREGISTER) {
+                       u8 deleted = 0;
+
+                       for_nexthops(rt) {
+                               if (!nh->nh_dev || nh->nh_dev == dev)
+                                       deleted++;
+                               if (nh->nh_dev == dev)
+                                       nh_del = true;
+                       } endfor_nexthops(rt);
+
+                       /* if there are no more nexthops, delete the route */
+                       if (deleted == rt->rt_nhn) {
+                               mpls_route_update(net, index, NULL, NULL);
+                               continue;
+                       }
+
+                       if (nh_del) {
+                               size_t size = sizeof(*rt) + rt->rt_nhn *
+                                       rt->rt_nh_size;
+                               struct mpls_route *orig = rt;
+
+                               rt = kmalloc(size, GFP_KERNEL);
+                               if (!rt)
+                                       return -ENOMEM;
+                               memcpy(rt, orig, size);
+                       }
+               }
+
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
 
-                       if (rtnl_dereference(nh->nh_dev) != dev)
+                       if (nh->nh_dev != dev)
                                goto next;
 
                        switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
                                break;
                        }
                        if (event == NETDEV_UNREGISTER)
-                               RCU_INIT_POINTER(nh->nh_dev, NULL);
+                               nh->nh_dev = NULL;
 
                        if (nh->nh_flags != nh_flags)
                                WRITE_ONCE(nh->nh_flags, nh_flags);
 next:
                        if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
                                alive++;
-                       if (!rtnl_dereference(nh->nh_dev))
-                               deleted++;
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
 
-               /* if there are no more nexthops, delete the route */
-               if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
-                       mpls_route_update(net, index, NULL, NULL);
+               if (nh_del)
+                       mpls_route_update(net, index, rt, NULL);
        }
+
+       return 0;
 }
 
 static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                alive = 0;
                change_nexthops(rt) {
                        unsigned int nh_flags = nh->nh_flags;
-                       struct net_device *nh_dev =
-                               rtnl_dereference(nh->nh_dev);
 
                        if (!(nh_flags & flags)) {
                                alive++;
                                continue;
                        }
-                       if (nh_dev != dev)
+                       if (nh->nh_dev != dev)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                return NOTIFY_OK;
 
        switch (event) {
+               int err;
+
        case NETDEV_DOWN:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                break;
        case NETDEV_UP:
                flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                break;
        case NETDEV_CHANGE:
                flags = dev_get_flags(dev);
-               if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+               if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
                        mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
-               else
-                       mpls_ifdown(dev, event);
+               } else {
+                       err = mpls_ifdown(dev, event);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
                break;
        case NETDEV_UNREGISTER:
-               mpls_ifdown(dev, event);
+               err = mpls_ifdown(dev, event);
+               if (err)
+                       return notifier_from_errno(err);
                mdev = mpls_dev_get(dev);
                if (mdev) {
                        mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_CHANGENAME:
                mdev = mpls_dev_get(dev);
                if (mdev) {
-                       int err;
-
                        mpls_dev_sysctl_unregister(dev, mdev);
                        err = mpls_dev_sysctl_register(dev, mdev);
                        if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                                nh->nh_via_alen))
                        goto nla_put_failure;
-               dev = rtnl_dereference(nh->nh_dev);
+               dev = nh->nh_dev;
                if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                        goto nla_put_failure;
                if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
                        goto nla_put_failure;
 
                for_nexthops(rt) {
-                       dev = rtnl_dereference(nh->nh_dev);
+                       dev = nh->nh_dev;
                        if (!dev)
                                continue;
 
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
 static bool mpls_rt_uses_dev(struct mpls_route *rt,
                             const struct net_device *dev)
 {
-       struct net_device *nh_dev;
-
        if (rt->rt_nhn == 1) {
                struct mpls_nh *nh = rt->rt_nh;
 
-               nh_dev = rtnl_dereference(nh->nh_dev);
-               if (dev == nh_dev)
+               if (nh->nh_dev == dev)
                        return true;
        } else {
                for_nexthops(rt) {
-                       nh_dev = rtnl_dereference(nh->nh_dev);
-                       if (nh_dev == dev)
+                       if (nh->nh_dev == dev)
                                return true;
                } endfor_nexthops(rt);
        }
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
                size_t nhsize = 0;
 
                for_nexthops(rt) {
-                       if (!rtnl_dereference(nh->nh_dev))
+                       if (!nh->nh_dev)
                                continue;
                        nhsize += nla_total_size(sizeof(struct rtnexthop));
                        /* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
            nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
                        nh->nh_via_alen))
                goto nla_put_failure;
-       dev = rtnl_dereference(nh->nh_dev);
+       dev = nh->nh_dev;
        if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
                goto nla_put_failure;
 
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt0))
                        goto nort0;
-               RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+               rt0->rt_nh->nh_dev = lo;
                rt0->rt_protocol = RTPROT_KERNEL;
                rt0->rt_payload_type = MPT_IPV4;
                rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
                if (IS_ERR(rt2))
                        goto nort2;
-               RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+               rt2->rt_nh->nh_dev = lo;
                rt2->rt_protocol = RTPROT_KERNEL;
                rt2->rt_payload_type = MPT_IPV6;
                rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
index 838cdfc10e47d6efd51a7f89c674458f845e6a44..893df00b77b62ec8a6c5a85dd37679a2210fce73 100644 (file)
@@ -87,7 +87,7 @@ enum mpls_payload_type {
 };
 
 struct mpls_nh { /* next hop label forwarding entry */
-       struct net_device __rcu *nh_dev;
+       struct net_device       *nh_dev;
 
        /* nh_flags is accessed under RCU in the packet path; it is
         * modified handling netdev events with rtnl lock held
index 7b96be1e9f14a11ddcfe41f609fdc1730e33106e..f523051f5aef3c2ccb9bdcb87c07de916ee65d1c 100644 (file)
@@ -700,6 +700,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
 
        msk_owned_by_me(msk);
 
+       if (sk->sk_state == TCP_LISTEN)
+               return;
+
        if (!rm_list->nr)
                return;
 
index c82a76d2d0bfeb3761d7eee60e6c0936cc8d14bd..54613f5b7521719df36014e9901351cac8328d87 100644 (file)
@@ -1524,7 +1524,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
                        int ret = 0;
 
                        prev_ssk = ssk;
-                       mptcp_flush_join_list(msk);
+                       __mptcp_flush_join_list(msk);
                        ssk = mptcp_subflow_get_send(msk);
 
                        /* First check. If the ssk has changed since
@@ -2879,7 +2879,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                 */
                if (WARN_ON_ONCE(!new_mptcp_sock)) {
                        tcp_sk(newsk)->is_mptcp = 0;
-                       return newsk;
+                       goto out;
                }
 
                /* acquire the 2nd reference for the owning socket */
@@ -2891,6 +2891,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                                MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
        }
 
+out:
+       newsk->sk_kern_sock = kern;
        return newsk;
 }
 
index 0f1e661c2032b1977021db8684101dbc82d8f9fc..f8efd478ac97f7ef359348cd7215867995f9e0eb 100644 (file)
@@ -525,7 +525,6 @@ static bool mptcp_supported_sockopt(int level, int optname)
                case TCP_NODELAY:
                case TCP_THIN_LINEAR_TIMEOUTS:
                case TCP_CONGESTION:
-               case TCP_ULP:
                case TCP_CORK:
                case TCP_KEEPIDLE:
                case TCP_KEEPINTVL:
index 770a63103c7a4240b8559a97f707588d569beba8..4712a90a1820ce00146ca0080aa6ce46c826331c 100644 (file)
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 
        tstamp = nf_conn_tstamp_find(ct);
        if (tstamp) {
-               s32 timeout = ct->timeout - nfct_time_stamp;
+               s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
 
                tstamp->stop = ktime_get_real_ns();
                if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
        }
 
        /* We want the clashing entry to go away real soon: 1 second timeout. */
-       loser_ct->timeout = nfct_time_stamp + HZ;
+       WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
 
        /* IPS_NAT_CLASH removes the entry automatically on the first
         * reply.  Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
        /* save hash for reusing when confirming */
        *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
        ct->status = 0;
-       ct->timeout = 0;
+       WRITE_ONCE(ct->timeout, 0);
        write_pnet(&ct->ct_net, net);
        memset(&ct->__nfct_init_offset, 0,
               offsetof(struct nf_conn, proto) -
index c7708bde057cb4e054ab0e22cf332058fdf19e5d..ec4164c32d27034284e7415c3668b77a8f9e0dbe 100644 (file)
@@ -1195,8 +1195,6 @@ restart:
                }
                hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
                                           hnnode) {
-                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
-                               continue;
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (nf_ct_is_expired(ct)) {
                                if (i < ARRAY_SIZE(nf_ct_evict) &&
@@ -1208,6 +1206,9 @@ restart:
                        if (!net_eq(net, nf_ct_net(ct)))
                                continue;
 
+                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                               continue;
+
                        if (cb->args[1]) {
                                if (ct != last)
                                        continue;
@@ -1998,7 +1999,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
 
        if (timeout > INT_MAX)
                timeout = INT_MAX;
-       ct->timeout = nfct_time_stamp + (u32)timeout;
+       WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
 
        if (test_bit(IPS_DYING_BIT, &ct->status))
                return -ETIME;
index 87a7388b6c8942273eb12447d959c8565cb3b606..ed37bb9b4e5886cd7681606ad9d423ec5575f9be 100644 (file)
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
        if (timeout < 0)
                timeout = 0;
 
-       if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
-               ct->timeout = nfct_time_stamp + timeout;
+       if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+               WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
 }
 
 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
index c0851fec11d46532f87423e1ab38988bf4418172..c20772822637206c9d17a8003d6c149207326bac 100644 (file)
@@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall {
 static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
                                     struct nft_set *set)
 {
-       struct nft_set_elem_catchall *catchall;
+       struct nft_set_elem_catchall *next, *catchall;
 
-       list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+       list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                list_del_rcu(&catchall->list);
                nft_set_elem_destroy(set, catchall->elem, true);
                kfree_rcu(catchall);
index 691ef4cffdd907cf09d3a7e680ebe83ea5562ee0..7f83f9697fc14d4b661ef9d837ad9cf77b34d13b 100644 (file)
@@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log,
                goto nla_put_failure;
 
        if (indev && skb->dev &&
-           skb->mac_header != skb->network_header) {
+           skb_mac_header_was_set(skb) &&
+           skb_mac_header_len(skb) != 0) {
                struct nfulnl_msg_packet_hw phw;
                int len;
 
index 4acc4b8e9fe5a0fde7305afda92e6ac01c59ac02..f0b9e21a24524beccdd7797f680dbd339f95758a 100644 (file)
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        struct net_device *indev;
        struct net_device *outdev;
        struct nf_conn *ct = NULL;
-       enum ip_conntrack_info ctinfo;
+       enum ip_conntrack_info ctinfo = 0;
        struct nfnl_ct_hook *nfnl_ct;
        bool csum_verify;
        char *secdata = NULL;
@@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                goto nla_put_failure;
 
        if (indev && entskb->dev &&
-           skb_mac_header_was_set(entskb)) {
+           skb_mac_header_was_set(entskb) &&
+           skb_mac_header_len(entskb) != 0) {
                struct nfqnl_msg_packet_hw phw;
                int len;
 
index af4ee874a067c69c295492584ab87c72b2caf23b..dbe1f2e7dd9ed620b1ab0c99c214178e6dbc6a16 100644 (file)
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
        tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
        if (!tcph)
-               return;
+               goto err;
 
        opt = (u8 *)tcph;
        for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
                        continue;
 
                if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
-                       return;
+                       goto err;
 
                if (skb_ensure_writable(pkt->skb,
                                        nft_thoff(pkt) + i + priv->len))
-                       return;
+                       goto err;
 
                tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
                                              &tcphdr_len);
                if (!tcph)
-                       return;
+                       goto err;
 
                offset = i + priv->offset;
 
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
 
                return;
        }
+       return;
+err:
+       regs->verdict.code = NFT_BREAK;
 }
 
 static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
index e517663e0cd175f0421b4878bbe4f3f9815acd65..6f4116e729581fd1f3bb418b124908cf38159afc 100644 (file)
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
                        NFT_PIPAPO_AVX2_BUCKET_LOAD8(4,  lt, 4, pkt[4], bsize);
 
                        NFT_PIPAPO_AVX2_AND(5, 0, 1);
-                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 6, pkt[5], bsize);
+                       NFT_PIPAPO_AVX2_BUCKET_LOAD8(6,  lt, 5, pkt[5], bsize);
                        NFT_PIPAPO_AVX2_AND(7, 2, 3);
 
                        /* Stall */
index 4c575324a98528bec4188acf27eecc2f98ae5e0a..9eba2e6483851db2313a093d3ac17deb4c45b7b6 100644 (file)
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (msg->msg_flags & MSG_OOB)
                return -EOPNOTSUPP;
 
+       if (len == 0) {
+               pr_warn_once("Zero length message leads to an empty skb\n");
+               return -ENODATA;
+       }
+
        err = scm_send(sock, msg, &scm, true);
        if (err < 0)
                return err;
index 334f63c9529efaf6170c82a0ec5bd7805d5f44da..f184b0db79d4026418ffa4fbf80bd6b38bdd3d0a 100644 (file)
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
 {
        struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
 
-       nfc_device_iter_exit(iter);
-       kfree(iter);
+       if (iter) {
+               nfc_device_iter_exit(iter);
+               kfree(iter);
+       }
 
        return 0;
 }
index 9713035b89e3ab2a20f9826621063a66c2f4c994..6d262d9aa10ea1ad48a8bed44b4f65cdb16920db 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/mpls.h>
 #include <net/ndisc.h>
 #include <net/nsh.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 #include "conntrack.h"
 #include "datapath.h"
@@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #endif
        bool post_ct = false;
        int res, err;
+       u16 zone = 0;
 
        /* Extract metadata from packet. */
        if (tun_info) {
@@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                key->recirc_id = tc_ext ? tc_ext->chain : 0;
                OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
                post_ct = tc_ext ? tc_ext->post_ct : false;
+               zone = post_ct ? tc_ext->zone : 0;
        } else {
                key->recirc_id = 0;
        }
@@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #endif
 
        err = key_extract(skb, key);
-       if (!err)
+       if (!err) {
                ovs_ct_fill_key(skb, key, post_ct);   /* Must be after key_extract(). */
+               if (post_ct && !skb_get_nfct(skb))
+                       key->ct_zone = zone;
+       }
        return err;
 }
 
index 46943a18a10d5413db57955dbd24302af7ef1d97..76c2dca7f0a594b859ec791f422fe514f1470df0 100644 (file)
@@ -4492,9 +4492,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
 out_free_pg_vec:
-       bitmap_free(rx_owner_map);
-       if (pg_vec)
+       if (pg_vec) {
+               bitmap_free(rx_owner_map);
                free_pg_vec(pg_vec, order, req->tp_block_nr);
+       }
 out:
        return err;
 }
index a1525916885ae9741972e32b16363319edc1aa31..65d463ad87707782a44152014fa760798554145f 100644 (file)
@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
 
        err = pep_accept_conn(newsk, skb);
        if (err) {
+               __sock_put(sk);
                sock_put(newsk);
                newsk = NULL;
                goto drop;
@@ -946,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        ret =  -EBUSY;
                else if (sk->sk_state == TCP_ESTABLISHED)
                        ret = -EISCONN;
+               else if (!pn->pn_sk.sobject)
+                       ret = -EADDRNOTAVAIL;
                else
                        ret = pep_sock_enable(sk, NULL, 0);
                release_sock(sk);
index a3bc4b54d4910567c915f41cd6bc44f279eaadf3..b4cc699c5fad37736a31619db2b0ac41babd164e 100644 (file)
@@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
                                 * should end up here, but if it
                                 * does, reset/destroy the connection.
                                 */
+                               kfree(conn->c_path);
                                kmem_cache_free(rds_conn_slab, conn);
                                conn = ERR_PTR(-EOPNOTSUPP);
                                goto out;
index abf19c0e3ba0bfcf0396df2d78b937e288b84ab0..5327d130c4b5691e788bbbcb990a349d714ad8d4 100644 (file)
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
        }
        if (rtn->rcvbuf_size > 0) {
-               sk->sk_sndbuf = rtn->rcvbuf_size;
+               sk->sk_rcvbuf = rtn->rcvbuf_size;
                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
        }
        release_sock(sk);
index dbea0bfee48e9e4df50fda68ade4624761f44d33..8120138dac01810854c8376acdec90e3c13dd4a8 100644 (file)
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
        return bundle;
 }
 
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+       rxrpc_put_peer(bundle->params.peer);
+       kfree(bundle);
+}
+
 void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
 {
        unsigned int d = bundle->debug_id;
        unsigned int u = atomic_dec_return(&bundle->usage);
 
        _debug("PUT B=%x %u", d, u);
-       if (u == 0) {
-               rxrpc_put_peer(bundle->params.peer);
-               kfree(bundle);
-       }
+       if (u == 0)
+               rxrpc_free_bundle(bundle);
 }
 
 /*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        return candidate;
 
 found_bundle_free:
-       kfree(candidate);
+       rxrpc_free_bundle(candidate);
 found_bundle:
        rxrpc_get_bundle(bundle);
        spin_unlock(&local->client_bundles_lock);
index 68396d05205252177ea9525b43972f1598480e12..0298fe2ad6d323b377b46d5dc70c8d472d899966 100644 (file)
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
        return peer;
 }
 
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+       rxrpc_put_local(peer->local);
+       kfree_rcu(peer, rcu);
+}
+
 /*
  * Set up a new incoming peer.  There shouldn't be any other matching peers
  * since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
                spin_unlock_bh(&rxnet->peer_hash_lock);
 
                if (peer)
-                       kfree(candidate);
+                       rxrpc_free_peer(candidate);
                else
                        peer = candidate;
        }
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
        list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
-       rxrpc_put_local(peer->local);
-       kfree_rcu(peer, rcu);
+       rxrpc_free_peer(peer);
 }
 
 /*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
        if (n == 0) {
                hash_del_rcu(&peer->hash_link);
                list_del_init(&peer->keepalive_link);
-               rxrpc_put_local(peer->local);
-               kfree_rcu(peer, rcu);
+               rxrpc_free_peer(peer);
        }
 }
 
index 90866ae45573a7ec3ff783c070555e56df126c3c..ab3591408419ff54ec249266da3a364bb62fad94 100644 (file)
@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
                                   u8 family, u16 zone, bool *defrag)
 {
        enum ip_conntrack_info ctinfo;
-       struct qdisc_skb_cb cb;
        struct nf_conn *ct;
        int err = 0;
        bool frag;
+       u16 mru;
 
        /* Previously seen (loopback)? Ignore. */
        ct = nf_ct_get(skb, &ctinfo);
@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
                return err;
 
        skb_get(skb);
-       cb = *qdisc_skb_cb(skb);
+       mru = tc_skb_cb(skb)->mru;
 
        if (family == NFPROTO_IPV4) {
                enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
 
                if (!err) {
                        *defrag = true;
-                       cb.mru = IPCB(skb)->frag_max_size;
+                       mru = IPCB(skb)->frag_max_size;
                }
        } else { /* NFPROTO_IPV6 */
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
 
                if (!err) {
                        *defrag = true;
-                       cb.mru = IP6CB(skb)->frag_max_size;
+                       mru = IP6CB(skb)->frag_max_size;
                }
 #else
                err = -EOPNOTSUPP;
@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
        }
 
        if (err != -EINPROGRESS)
-               *qdisc_skb_cb(skb) = cb;
+               tc_skb_cb(skb)->mru = mru;
        skb_clear_hash(skb);
        skb->ignore_df = 1;
        return err;
@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_action_update_bstats(&c->common, skb);
 
        if (clear) {
-               qdisc_skb_cb(skb)->post_ct = false;
+               tc_skb_cb(skb)->post_ct = false;
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
                        nf_conntrack_put(&ct->ct_general);
@@ -1048,7 +1048,8 @@ do_nat:
 out_push:
        skb_push_rcsum(skb, nh_ofs);
 
-       qdisc_skb_cb(skb)->post_ct = true;
+       tc_skb_cb(skb)->post_ct = true;
+       tc_skb_cb(skb)->zone = p->zone;
 out_clear:
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
index 2ef8f5a6205a9d92c2195444cb0469702be93da5..35c74bdde848e2d3e100ffd6589946ce9d3d10eb 100644 (file)
@@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb,
 
        /* If we missed on some chain */
        if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+               struct tc_skb_cb *cb = tc_skb_cb(skb);
+
                ext = tc_skb_ext_alloc(skb);
                if (WARN_ON_ONCE(!ext))
                        return TC_ACT_SHOT;
                ext->chain = last_executed_chain;
-               ext->mru = qdisc_skb_cb(skb)->mru;
-               ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+               ext->mru = cb->mru;
+               ext->post_ct = cb->post_ct;
+               ext->zone = cb->zone;
        }
 
        return ret;
@@ -3687,6 +3690,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                                entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
                                break;
                        default:
+                               err = -EOPNOTSUPP;
                                goto err_out_locked;
                        }
                } else if (is_tcf_skbedit_ptype(act)) {
index aab13ba1176729cca0e4f010c82a9b467353ce44..ef54ed3958742881eb42f17ee3598fb3fec2e02c 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <net/sch_generic.h>
 #include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
 #include <net/ip.h>
 #include <net/flow_dissector.h>
 #include <net/geneve.h>
@@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res)
 {
        struct cls_fl_head *head = rcu_dereference_bh(tp->root);
-       bool post_ct = qdisc_skb_cb(skb)->post_ct;
+       bool post_ct = tc_skb_cb(skb)->post_ct;
+       u16 zone = tc_skb_cb(skb)->zone;
        struct fl_flow_key skb_key;
        struct fl_flow_mask *mask;
        struct cls_fl_filter *f;
@@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
                                    fl_ct_info_to_flower_map,
                                    ARRAY_SIZE(fl_ct_info_to_flower_map),
-                                   post_ct);
+                                   post_ct, zone);
                skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
                skb_flow_dissect(skb, &mask->dissector, &skb_key,
                                 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
index 3c2300d144681869a37ada0d20966f9b5b145653..857aaebd49f4315502928fb1f75d2c85eb63eb51 100644 (file)
@@ -2736,7 +2736,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
                           GFP_KERNEL);
        if (!q->tins)
-               goto nomem;
+               return -ENOMEM;
 
        for (i = 0; i < CAKE_MAX_TINS; i++) {
                struct cake_tin_data *b = q->tins + i;
@@ -2766,10 +2766,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        q->min_netlen = ~0;
        q->min_adjlen = ~0;
        return 0;
-
-nomem:
-       cake_destroy(sch);
-       return -ENOMEM;
 }
 
 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
index e007fc75ef2feffdda6435031d548ff38fb1a790..d73393493553389ac77fa7d16c500ad6c7f49404 100644 (file)
@@ -666,9 +666,9 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
                }
        }
        for (i = q->nbands; i < oldbands; i++) {
-               qdisc_tree_flush_backlog(q->classes[i].qdisc);
-               if (i >= q->nstrict)
+               if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
                        list_del(&q->classes[i].alist);
+               qdisc_tree_flush_backlog(q->classes[i].qdisc);
        }
        q->nstrict = nstrict;
        memcpy(q->prio2band, priomap, sizeof(priomap));
index 830f3559f727ad471b595b2f5b04788827da89b0..d6aba6edd16e5eab120a57c316fcb06a5d5f3442 100644 (file)
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
        struct fq_pie_sched_data *q = qdisc_priv(sch);
 
        tcf_block_put(q->block);
+       q->p_params.tupdate = 0;
        del_timer_sync(&q->adapt_timer);
        kvfree(q->flows);
 }
index 8c06381391d6feecaa792f6046c65b8951473f42..5ded4c8672a64dc806b7fb755e809b2383eaab10 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 #include <net/netlink.h>
 #include <net/sch_generic.h>
+#include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/ip6_fib.h>
@@ -137,7 +138,7 @@ err:
 
 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
 {
-       u16 mru = qdisc_skb_cb(skb)->mru;
+       u16 mru = tc_skb_cb(skb)->mru;
        int err;
 
        if (mru && skb->len > mru + skb->dev->hard_header_len)
index 230072f9ec48e3bf3cd43f9aa7fafa4a096ac402..1c9289f56dc471d33b08f6138bee3bea8aabe26f 100644 (file)
@@ -194,7 +194,9 @@ static int smc_release(struct socket *sock)
        /* cleanup for a dangling non-blocking connect */
        if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
                tcp_abort(smc->clcsock->sk, ECONNABORTED);
-       flush_work(&smc->connect_work);
+
+       if (cancel_work_sync(&smc->connect_work))
+               sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
 
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
index 3715d2f5ad555ce365738e73a99ed8c7ca6a2cfd..292e4d904ab6e4afbba2cef421bce27cae5af364 100644 (file)
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
        int old_state;
        long timeout;
        int rc = 0;
+       int rc1 = 0;
 
        timeout = current->flags & PF_EXITING ?
                  0 : sock_flag(sk, SOCK_LINGER) ?
@@ -232,8 +233,11 @@ again:
                        /* actively shutdown clcsock before peer close it,
                         * prevent peer from entering TIME_WAIT state.
                         */
-                       if (smc->clcsock && smc->clcsock->sk)
-                               rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+                       if (smc->clcsock && smc->clcsock->sk) {
+                               rc1 = kernel_sock_shutdown(smc->clcsock,
+                                                          SHUT_RDWR);
+                               rc = rc ? rc : rc1;
+                       }
                } else {
                        /* peer event has changed the state */
                        goto again;
index bb52c8b5f148af8536246953434a50a22cce2800..387d28b2f8dd055a3b3ceea6024ffb231f115877 100644 (file)
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
 void smc_lgr_cleanup_early(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
-       struct list_head *lgr_list;
        spinlock_t *lgr_lock;
 
        if (!lgr)
                return;
 
        smc_conn_free(conn);
-       lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+       smc_lgr_list_head(lgr, &lgr_lock);
        spin_lock_bh(lgr_lock);
        /* do not use this link group for new connections */
-       if (!list_empty(lgr_list))
-               list_del_init(lgr_list);
+       if (!list_empty(&lgr->list))
+               list_del_init(&lgr->list);
        spin_unlock_bh(lgr_lock);
        __smc_lgr_terminate(lgr, true);
 }
index b4d9419a015b1b984119ffd85c546ab6d5f3fdcc..d293614d5fc651906621a7eb9c5de5d22fbee747 100644 (file)
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
                return -EEXIST;
 
        /* Allocate a new AEAD */
-       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
        if (unlikely(!tmp))
                return -ENOMEM;
 
@@ -1474,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
                return -EEXIST;
 
        /* Allocate crypto */
-       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       c = kzalloc(sizeof(*c), GFP_ATOMIC);
        if (!c)
                return -ENOMEM;
 
@@ -1488,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
        }
 
        /* Allocate statistic structure */
-       c->stats = alloc_percpu(struct tipc_crypto_stats);
+       c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
        if (!c->stats) {
                if (c->wq)
                        destroy_workqueue(c->wq);
@@ -2461,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
        }
 
        /* Lets duplicate it first */
-       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
+       skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
        rcu_read_unlock();
 
        /* Now, generate new key, initiate & distribute it */
index d3e7ff90889e35da299bf4d9127b6f766845c6e1..dfe623a4e72f48bb12a68abcfb3081c45a7716f5 100644 (file)
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
        memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
               prot->iv_size + prot->salt_size);
 
-       xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+       xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
 
        sge->offset += prot->prepend_size;
        sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
        else
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
 
-       xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+       xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
 
        /* Prepare AAD */
        tls_make_aad(aad, rxm->full_len - prot->overhead_size +
index 59ee1be5a6dd3631155d4fe47b5f9fca7d0fa716..ec2c2afbf0d060b4f6335a7616c8deba0a95fde6 100644 (file)
@@ -1299,7 +1299,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
        space_available = virtio_transport_space_update(sk, pkt);
 
        /* Update CID in case it has changed after a transport reset event */
-       vsk->local_addr.svm_cid = dst.svm_cid;
+       if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+               vsk->local_addr.svm_cid = dst.svm_cid;
 
        if (space_available)
                sk->sk_write_space(sk);
index df87c7f3a04921d43ae0f536444101bc4ddfdab6..f8f01a3e020ba9371af6c304d52e385487ce3ff9 100644 (file)
@@ -133,6 +133,7 @@ static u32 reg_is_indoor_portid;
 
 static void restore_regulatory_settings(bool reset_user, bool cached);
 static void print_regdomain(const struct ieee80211_regdomain *rd);
+static void reg_process_hint(struct regulatory_request *reg_request);
 
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
@@ -1098,6 +1099,8 @@ int reg_reload_regdb(void)
        const struct firmware *fw;
        void *db;
        int err;
+       const struct ieee80211_regdomain *current_regdomain;
+       struct regulatory_request *request;
 
        err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev);
        if (err)
@@ -1118,8 +1121,26 @@ int reg_reload_regdb(void)
        if (!IS_ERR_OR_NULL(regdb))
                kfree(regdb);
        regdb = db;
-       rtnl_unlock();
 
+       /* reset regulatory domain */
+       current_regdomain = get_cfg80211_regdom();
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (!request) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       request->wiphy_idx = WIPHY_IDX_INVALID;
+       request->alpha2[0] = current_regdomain->alpha2[0];
+       request->alpha2[1] = current_regdomain->alpha2[1];
+       request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+       reg_process_hint(request);
+
+out_unlock:
+       rtnl_unlock();
  out:
        release_firmware(fw);
        return err;
@@ -2338,6 +2359,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
        struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
+       bool ret;
 
        wdev_lock(wdev);
        iftype = wdev->iftype;
@@ -2387,7 +2409,11 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
        case NL80211_IFTYPE_ADHOC:
-               return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+               wiphy_lock(wiphy);
+               ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+               wiphy_unlock(wiphy);
+
+               return ret;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
                return cfg80211_chandef_usable(wiphy, &chandef,
index f16074eb53c72a7040421d23028d5762e0c5658d..7a466ea962c57f013454bde2892dfa38db7dd86b 100644 (file)
@@ -677,8 +677,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
        struct xdp_sock *xs = xdp_sk(sk);
        struct xsk_buff_pool *pool;
 
-       sock_poll_wait(file, sock, wait);
-
        if (unlikely(!xsk_is_bound(xs)))
                return mask;
 
@@ -690,6 +688,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
                else
                        /* Poll needs to drive Tx also in copy mode */
                        __xsk_sendmsg(sk);
+       } else {
+               sock_poll_wait(file, sock, wait);
        }
 
        if (xs->rx && !xskq_prod_is_empty(xs->rx))
index b9198e2eef282a39e4980db23d6f4c7b6ee3813b..faf8cdb79c5f48ae87d41008b079e607b9bcb68a 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
 obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
 
 CFLAGS_sample-trace-array.o := -I$(src)
 obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
new file mode 100644 (file)
index 0000000..91bc42a
--- /dev/null
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/ftrace.h>
+#include <asm/asm-offsets.h>
+
+void my_direct_func1(unsigned long ip)
+{
+       trace_printk("my direct func1 ip %lx\n", ip);
+}
+
+void my_direct_func2(unsigned long ip)
+{
+       trace_printk("my direct func2 ip %lx\n", ip);
+}
+
+extern void my_tramp1(void *);
+extern void my_tramp2(void *);
+
+#ifdef CONFIG_X86_64
+
+asm (
+"      .pushsection    .text, \"ax\", @progbits\n"
+"      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
+"   my_tramp1:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func1\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp1, .-my_tramp1\n"
+"      .type           my_tramp2, @function\n"
+"\n"
+"      .globl          my_tramp2\n"
+"   my_tramp2:"
+"      pushq %rbp\n"
+"      movq %rsp, %rbp\n"
+"      pushq %rdi\n"
+"      movq 8(%rbp), %rdi\n"
+"      call my_direct_func2\n"
+"      popq %rdi\n"
+"      leave\n"
+"      ret\n"
+"      .size           my_tramp2, .-my_tramp2\n"
+"      .popsection\n"
+);
+
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+"       .pushsection    .text, \"ax\", @progbits\n"
+"       .type           my_tramp1, @function\n"
+"       .globl          my_tramp1\n"
+"   my_tramp1:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func1\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp1, .-my_tramp1\n"
+"\n"
+"       .type           my_tramp2, @function\n"
+"       .globl          my_tramp2\n"
+"   my_tramp2:"
+"       lgr             %r1,%r15\n"
+"       stmg            %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       stg             %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       aghi            %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+"       stg             %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+"       lgr             %r2,%r0\n"
+"       brasl           %r14,my_direct_func2\n"
+"       aghi            %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+"       lmg             %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+"       lg              %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+"       lgr             %r1,%r0\n"
+"       br              %r1\n"
+"       .size           my_tramp2, .-my_tramp2\n"
+"       .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
+static unsigned long my_tramp = (unsigned long)my_tramp1;
+static unsigned long tramps[2] = {
+       (unsigned long)my_tramp1,
+       (unsigned long)my_tramp2,
+};
+
+static struct ftrace_ops direct;
+
+static int simple_thread(void *arg)
+{
+       static int t;
+       int ret = 0;
+
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(2 * HZ);
+
+               if (ret)
+                       continue;
+               t ^= 1;
+               ret = modify_ftrace_direct_multi(&direct, tramps[t]);
+               if (!ret)
+                       my_tramp = tramps[t];
+               WARN_ON_ONCE(ret);
+       }
+
+       return 0;
+}
+
+static struct task_struct *simple_tsk;
+
+static int __init ftrace_direct_multi_init(void)
+{
+       int ret;
+
+       ftrace_set_filter_ip(&direct, (unsigned long) wake_up_process, 0, 0);
+       ftrace_set_filter_ip(&direct, (unsigned long) schedule, 0, 0);
+
+       ret = register_ftrace_direct_multi(&direct, my_tramp);
+
+       if (!ret)
+               simple_tsk = kthread_run(simple_thread, NULL, "event-sample-fn");
+       return ret;
+}
+
+static void __exit ftrace_direct_multi_exit(void)
+{
+       kthread_stop(simple_tsk);
+       unregister_ftrace_direct_multi(&direct, my_tramp);
+}
+
+module_init(ftrace_direct_multi_init);
+module_exit(ftrace_direct_multi_exit);
+
+MODULE_AUTHOR("Jiri Olsa");
+MODULE_DESCRIPTION("Example use case of using modify_ftrace_direct_multi()");
+MODULE_LICENSE("GPL");
index 7d631aaa0ae118bc41e9649d811beaf4ee4fe6ec..52a000b057a575995f5a0dab27d70c1e1e0d463f 100755 (executable)
@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
 
 } elsif ($arch eq "s390" && $bits == 64) {
     if ($cc =~ /-DCC_USING_HOTPATCH/) {
-       $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+       $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
        $mcount_adjust = 0;
     }
     $alignment = 8;
index 62d30c0a30c291753bdf1df23d7909394b204286..1afc06ffd969fc9479cea666d7741b6eb37b5a70 100644 (file)
@@ -611,10 +611,11 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
        return 0;
 }
 
-static int parse_sid(struct super_block *sb, const char *s, u32 *sid)
+static int parse_sid(struct super_block *sb, const char *s, u32 *sid,
+                    gfp_t gfp)
 {
        int rc = security_context_str_to_sid(&selinux_state, s,
-                                            sid, GFP_KERNEL);
+                                            sid, gfp);
        if (rc)
                pr_warn("SELinux: security_context_str_to_sid"
                       "(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -685,7 +686,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
         */
        if (opts) {
                if (opts->fscontext) {
-                       rc = parse_sid(sb, opts->fscontext, &fscontext_sid);
+                       rc = parse_sid(sb, opts->fscontext, &fscontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
@@ -694,7 +696,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= FSCONTEXT_MNT;
                }
                if (opts->context) {
-                       rc = parse_sid(sb, opts->context, &context_sid);
+                       rc = parse_sid(sb, opts->context, &context_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
@@ -703,7 +706,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= CONTEXT_MNT;
                }
                if (opts->rootcontext) {
-                       rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid);
+                       rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
@@ -712,7 +716,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
                        sbsec->flags |= ROOTCONTEXT_MNT;
                }
                if (opts->defcontext) {
-                       rc = parse_sid(sb, opts->defcontext, &defcontext_sid);
+                       rc = parse_sid(sb, opts->defcontext, &defcontext_sid,
+                                       GFP_KERNEL);
                        if (rc)
                                goto out;
                        if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
@@ -2702,14 +2707,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
                return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
 
        if (opts->fscontext) {
-               rc = parse_sid(sb, opts->fscontext, &sid);
+               rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
                        return 1;
        }
        if (opts->context) {
-               rc = parse_sid(sb, opts->context, &sid);
+               rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2719,14 +2724,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
                struct inode_security_struct *root_isec;
 
                root_isec = backing_inode_security(sb->s_root);
-               rc = parse_sid(sb, opts->rootcontext, &sid);
+               rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
                        return 1;
        }
        if (opts->defcontext) {
-               rc = parse_sid(sb, opts->defcontext, &sid);
+               rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT);
                if (rc)
                        return 1;
                if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -2749,14 +2754,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
                return 0;
 
        if (opts->fscontext) {
-               rc = parse_sid(sb, opts->fscontext, &sid);
+               rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
                        goto out_bad_option;
        }
        if (opts->context) {
-               rc = parse_sid(sb, opts->context, &sid);
+               rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2765,14 +2770,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
        if (opts->rootcontext) {
                struct inode_security_struct *root_isec;
                root_isec = backing_inode_security(sb->s_root);
-               rc = parse_sid(sb, opts->rootcontext, &sid);
+               rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
                        goto out_bad_option;
        }
        if (opts->defcontext) {
-               rc = parse_sid(sb, opts->defcontext, &sid);
+               rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL);
                if (rc)
                        return rc;
                if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
index 1da2e3722b1269188932a8f78d42f1cf8f4232ce..6799b1122c9d88c4ae8e617490eff5c6b81deb83 100644 (file)
@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                return false;
        if (!domain)
                return true;
+       if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
+               return false;
        list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
                                srcu_read_lock_held(&tomoyo_ss)) {
                u16 perm;
-               u8 i;
 
                if (ptr->is_deleted)
                        continue;
@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                 */
                switch (ptr->type) {
                case TOMOYO_TYPE_PATH_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_PATH2_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_PATH_NUMBER_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
+                       perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
                                  ->perm);
                        break;
                case TOMOYO_TYPE_MKDEV_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_INET_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_UNIX_ACL:
-                       data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+                       perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
                        break;
                case TOMOYO_TYPE_MANUAL_TASK_ACL:
                        perm = 0;
@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
                default:
                        perm = 1;
                }
-               for (i = 0; i < 16; i++)
-                       if (perm & (1 << i))
-                               count++;
+               count += hweight16(perm);
        }
        if (count < tomoyo_profile(domain->ns, domain->profile)->
            pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
                return true;
-       if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
-               domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
-               /* r->granted = false; */
-               tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+       WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
+       /* r->granted = false; */
+       tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
 #ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
-               pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
-                       domain->domainname->name);
+       pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+               domain->domainname->name);
 #endif
-       }
        return false;
 }
index 470dabc60aa0e23e9daea43314dc637283903fa5..edff063e088d28266677da124443ae6960588282 100644 (file)
@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
                                  struct snd_ctl_elem_value *data,
                                  int type, int count)
 {
+       struct snd_ctl_elem_value32 __user *data32 = userdata;
        int i, size;
 
        if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
                if (copy_to_user(valuep, data->value.bytes.data, size))
                        return -EFAULT;
        }
+       if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
+               return -EFAULT;
        return 0;
 }
 
index 32350c6aba849d41af1586a0583f6b1f87b51d35..537df1e98f8acf6c6cf7dd04651046e02332187f 100644 (file)
@@ -509,6 +509,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
                return -ENOMEM;
 
        jack->id = kstrdup(id, GFP_KERNEL);
+       if (jack->id == NULL) {
+               kfree(jack);
+               return -ENOMEM;
+       }
 
        /* don't creat input device for phantom jack */
        if (!phantom_jack) {
index 82a818734a5f769268872f0f2d895f616d5a8c4e..20a0a4771b9a830f5750f84230a49ee71215a08e 100644 (file)
@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
  *
  * Return the maximum value for field PAR.
  */
-static unsigned int
+static int
 snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
                           snd_pcm_hw_param_t var, int *dir)
 {
@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                                   struct snd_pcm_hw_params *oss_params,
                                   struct snd_pcm_hw_params *slave_params)
 {
-       size_t s;
-       size_t oss_buffer_size, oss_period_size, oss_periods;
-       size_t min_period_size, max_period_size;
+       ssize_t s;
+       ssize_t oss_buffer_size;
+       ssize_t oss_period_size, oss_periods;
+       ssize_t min_period_size, max_period_size;
        struct snd_pcm_runtime *runtime = substream->runtime;
        size_t oss_frame_size;
 
        oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
                         params_channels(oss_params) / 8;
 
+       oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
+                                                    SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+                                                    NULL);
+       if (oss_buffer_size <= 0)
+               return -EINVAL;
        oss_buffer_size = snd_pcm_plug_client_size(substream,
-                                                  snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
-       if (!oss_buffer_size)
+                                                  oss_buffer_size * oss_frame_size);
+       if (oss_buffer_size <= 0)
                return -EINVAL;
        oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
        if (atomic_read(&substream->mmap_count)) {
@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        min_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (min_period_size) {
+       if (min_period_size > 0) {
                min_period_size *= oss_frame_size;
                min_period_size = roundup_pow_of_two(min_period_size);
                if (oss_period_size < min_period_size)
@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
 
        max_period_size = snd_pcm_plug_client_size(substream,
                                                   snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
-       if (max_period_size) {
+       if (max_period_size > 0) {
                max_period_size *= oss_frame_size;
                max_period_size = rounddown_pow_of_two(max_period_size);
                if (oss_period_size > max_period_size)
@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
                oss_periods = substream->oss.setup.periods;
 
        s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
-       if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+       if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
                s = runtime->oss.maxfrags;
        if (oss_periods > s)
                oss_periods = s;
@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
                err = -EINVAL;
                goto failure;
        }
-       choose_rate(substream, sparams, runtime->oss.rate);
-       snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
+
+       err = choose_rate(substream, sparams, runtime->oss.rate);
+       if (err < 0)
+               goto failure;
+       err = snd_pcm_hw_param_near(substream, sparams,
+                                   SNDRV_PCM_HW_PARAM_CHANNELS,
+                                   runtime->oss.channels, NULL);
+       if (err < 0)
+               goto failure;
 
        format = snd_pcm_oss_format_from(runtime->oss.format);
 
@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
        if (runtime->oss.subdivision || runtime->oss.fragshift)
                return -EINVAL;
        fragshift = val & 0xffff;
-       if (fragshift >= 31)
+       if (fragshift >= 25) /* should be large enough */
                return -EINVAL;
        runtime->oss.fragshift = fragshift;
        runtime->oss.maxfrags = (val >> 16) & 0xffff;
index 6f30231bdb88454c0d6c279f557965c1083608c4..befa9809ff001ec514a0512d560ce079041d86cc 100644 (file)
@@ -447,6 +447,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
                err = -ENOMEM;
                goto __error;
        }
+       rawmidi_file->user_pversion = 0;
        init_waitqueue_entry(&wait, current);
        add_wait_queue(&rmidi->open_wait, &wait);
        while (1) {
index e1b69c65c3c88954e203aa82f691eb30af11dcf3..e2b7be67f0e303813837c252fe2ac76ef4be6390 100644 (file)
@@ -397,7 +397,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
        }
        if (instr_4op) {
                vp2 = &opl3->voices[voice + 3];
-               if (vp->state > 0) {
+               if (vp2->state > 0) {
                        opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
                                               voice_offset + 3);
                        reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
index 10a0bffc3cf6c205b4596d48728c442b286f9365..4208fa8a4db5bc10787d797b04ead71572b6f503 100644 (file)
@@ -252,6 +252,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
+       {
+               .flags = FLAG_SOF,
+               .device = 0x02c8,
+               .codec_hid = "ESSX8336",
+       },
 /* Cometlake-H */
        {
                .flags = FLAG_SOF,
@@ -276,6 +281,11 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x06c8,
        },
+               {
+               .flags = FLAG_SOF,
+               .device = 0x06c8,
+               .codec_hid = "ESSX8336",
+       },
 #endif
 
 /* Icelake */
index c0123bc31c0ddf53a8f30458bc9062b4584731f5..b7758dbe23714a98752f572899a178efd3f7e06c 100644 (file)
@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
                return AE_NOT_FOUND;
        }
 
-       info->handle = handle;
-
        /*
         * On some Intel platforms, multiple children of the HDAS
         * device can be found, but only one of them is the SoundWire
@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
        if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
                return AE_OK; /* keep going */
 
+       /* found the correct SoundWire controller */
+       info->handle = handle;
+
        /* device found, stop namespace walk */
        return AE_CTRL_TERMINATE;
 }
@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
        acpi_status status;
 
        info->handle = NULL;
+       /*
+        * In the HDAS ACPI scope, 'SNDW' may be either the child of
+        * 'HDAS' or the grandchild of 'HDAS'. So let's go through
+        * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
+        * device.
+        */
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
-                                    parent_handle, 1,
+                                    parent_handle, 2,
                                     sdw_intel_acpi_cb,
                                     NULL, info, NULL);
        if (ACPI_FAILURE(status) || info->handle == NULL)
index fe51163f2d82df9347be08a94a486503705779cd..1b46b599a5cff282dbdb02ea22202b941711d724 100644 (file)
@@ -335,7 +335,10 @@ enum {
                                        ((pci)->device == 0x0c0c) || \
                                        ((pci)->device == 0x0d0c) || \
                                        ((pci)->device == 0x160c) || \
-                                       ((pci)->device == 0x490d))
+                                       ((pci)->device == 0x490d) || \
+                                       ((pci)->device == 0x4f90) || \
+                                       ((pci)->device == 0x4f91) || \
+                                       ((pci)->device == 0x4f92))
 
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 
@@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = {
        /* DG1 */
        { PCI_DEVICE(0x8086, 0x490d),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* DG2 */
+       { PCI_DEVICE(0x8086, 0x4f90),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f91),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4f92),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index ea8ab8b43337896ba3ebee6a89a94d63fedff2df..d22c96eb2f8fb7324f44335835dd36bd8576e234 100644 (file)
@@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
 #define for_each_hda_codec_node(nid, codec) \
        for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
 
+/* Set the codec power_state flag to indicate to allow unsol event handling;
+ * see hda_codec_unsol_event() in hda_bind.c.  Calling this might confuse the
+ * state tracking, so use with care.
+ */
+static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec)
+{
+       codec->core.dev.power.power_state = PMSG_ON;
+}
+
 /*
  * get widget capabilities
  */
index 31ff11ab868e1ac3c1aff15d20a52728884e2788..039b9f2f8e94700f097cc58dc3a02ace0fae2dcf 100644 (file)
@@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
        if (cs42l42->full_scale_vol)
                cs8409_i2c_write(cs42l42, 0x2001, 0x01);
 
+       /* we have to explicitly allow unsol event handling even during the
+        * resume phase so that the jack event is processed properly
+        */
+       snd_hda_codec_allow_unsol_events(cs42l42->codec);
+
        cs42l42_enable_jack_detect(cs42l42);
 }
 
index 65d2c55399195a4c6b89afe3f62f1debbe8fbeed..ffcde7409d2a58fbcfc1bb12e84b3f320f3c1329 100644 (file)
@@ -2947,7 +2947,8 @@ static int parse_intel_hdmi(struct hda_codec *codec)
 
 /* Intel Haswell and onwards; audio component with eld notifier */
 static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
-                                const int *port_map, int port_num, int dev_num)
+                                const int *port_map, int port_num, int dev_num,
+                                bool send_silent_stream)
 {
        struct hdmi_spec *spec;
        int err;
@@ -2980,7 +2981,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
         * Enable silent stream feature, if it is enabled via
         * module param or Kconfig option
         */
-       if (enable_silent_stream)
+       if (send_silent_stream)
                spec->send_silent_stream = true;
 
        return parse_intel_hdmi(codec);
@@ -2988,12 +2989,18 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
 
 static int patch_i915_hsw_hdmi(struct hda_codec *codec)
 {
-       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
+       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3,
+                                    enable_silent_stream);
 }
 
 static int patch_i915_glk_hdmi(struct hda_codec *codec)
 {
-       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
+       /*
+        * Silent stream calls audio component .get_power() from
+        * .pin_eld_notify(). On GLK this will deadlock in i915 due
+        * to the audio vs. CDCLK workaround.
+        */
+       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3, false);
 }
 
 static int patch_i915_icl_hdmi(struct hda_codec *codec)
@@ -3004,7 +3011,8 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
         */
        static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
 
-       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
+       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3,
+                                    enable_silent_stream);
 }
 
 static int patch_i915_tgl_hdmi(struct hda_codec *codec)
@@ -3016,7 +3024,8 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
        static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
        int ret;
 
-       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
+       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4,
+                                   enable_silent_stream);
        if (!ret) {
                struct hdmi_spec *spec = codec->spec;
 
@@ -4380,10 +4389,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",     patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 9ce7457533c966799c82448642790092da2b2142..28255e752c4a1da5f548df8d222bf452dd17bc55 100644 (file)
@@ -6503,22 +6503,26 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
 /* for alc285_fixup_ideapad_s740_coef() */
 #include "ideapad_s740_helper.c"
 
-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
-                                                           const struct hda_fixup *fix,
-                                                           int action)
+static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
+       WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
+       WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
+       WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
+       {}
+};
+
+static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
+                                          const struct hda_fixup *fix,
+                                          int action)
 {
        /*
-       * A certain other OS sets these coeffs to different values. On at least one TongFang
-       * barebone these settings might survive even a cold reboot. So to restore a clean slate the
-       * values are explicitly reset to default here. Without this, the external microphone is
-       * always in a plugged-in state, while the internal microphone is always in an unplugged
-       * state, breaking the ability to use the internal microphone.
-       */
-       alc_write_coef_idx(codec, 0x24, 0x0000);
-       alc_write_coef_idx(codec, 0x26, 0x0000);
-       alc_write_coef_idx(codec, 0x29, 0x3000);
-       alc_write_coef_idx(codec, 0x37, 0xfe05);
-       alc_write_coef_idx(codec, 0x45, 0x5089);
+        * A certain other OS sets these coeffs to different values. On at least
+        * one TongFang barebone these settings might survive even a cold
+        * reboot. So to restore a clean slate the values are explicitly reset
+        * to default here. Without this, the external microphone is always in a
+        * plugged-in state, while the internal microphone is always in an
+        * unplugged state, breaking the ability to use the internal microphone.
+        */
+       alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
 }
 
 static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
@@ -6542,6 +6546,23 @@ static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
        alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
 }
 
+static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
+                                                   const struct hda_fixup *fix,
+                                                   int action)
+{
+       /*
+        * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec,
+        * but uses the 0x8686 subproduct id in both cases. The ALC256 codec
+        * needs an additional quirk for sound working after suspend and resume.
+        */
+       if (codec->core.vendor_id == 0x10ec0256) {
+               alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+               snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120);
+       } else {
+               snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c);
+       }
+}
+
 enum {
        ALC269_FIXUP_GPIO2,
        ALC269_FIXUP_SONY_VAIO,
@@ -6759,9 +6780,10 @@ enum {
        ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
        ALC287_FIXUP_13S_GEN2_SPEAKERS,
-       ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+       ALC256_FIXUP_SET_COEF_DEFAULTS,
        ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
        ALC233_FIXUP_NO_AUDIO_JACK,
+       ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -8465,9 +8487,9 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE,
        },
-       [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+       [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+               .v.func = alc256_fixup_set_coef_defaults,
        },
        [ALC245_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
@@ -8486,6 +8508,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc233_fixup_no_audio_jack,
        },
+       [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_mic_no_presence_and_resume,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8656,6 +8684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+       SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
@@ -8701,6 +8730,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8825,7 +8855,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
        SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8929,7 +8959,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
-       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
+       SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9119,6 +9149,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
        {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
        {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+       {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -10231,6 +10262,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
        }
 }
 
+static void alc897_hp_automute_hook(struct hda_codec *codec,
+                                        struct hda_jack_callback *jack)
+{
+       struct alc_spec *spec = codec->spec;
+       int vref;
+
+       snd_hda_gen_hp_automute(codec, jack);
+       vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+       snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+                           vref);
+}
+
+static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+                                    const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+       }
+}
+
 static const struct coef_fw alc668_coefs[] = {
        WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
        WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
@@ -10311,6 +10363,8 @@ enum {
        ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
        ALC668_FIXUP_HEADSET_MIC,
        ALC668_FIXUP_MIC_DET_COEF,
+       ALC897_FIXUP_LENOVO_HEADSET_MIC,
+       ALC897_FIXUP_HEADSET_MIC_PIN,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -10717,6 +10771,19 @@ static const struct hda_fixup alc662_fixups[] = {
                        {}
                },
        },
+       [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc897_fixup_lenovo_headset_mic,
+       },
+       [ALC897_FIXUP_HEADSET_MIC_PIN] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x03a11050 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10761,6 +10828,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
index 957eeb6fb8e379d7fa8b821120d52ac5d4d0726c..7e9a9a9d8ddd94fc9624b6a213fcb1e6acbd9296 100644 (file)
@@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci,
 {
        struct acp6x_dev_data *adata;
        struct platform_device_info pdevinfo[ACP6x_DEVS];
-       int ret, index;
+       int index = 0;
        int val = 0x00;
        u32 addr;
        unsigned int irqflags;
+       int ret;
 
        irqflags = IRQF_SHARED;
        /* Yellow Carp device check */
index 90a921f726c3bdb398ff15bd7e52a673d76b5d8d..3fa99741779af3d90216bf004349435f6b0d4c12 100644 (file)
@@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = {
 
 MODULE_DEVICE_TABLE(spi, cs35l41_id_spi);
 
-static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41,
-                                 bool is_pre_setup, unsigned int *freq)
-{
-       struct spi_device *spi;
-       u32 orig_spi_freq;
-
-       spi = to_spi_device(cs35l41->dev);
-
-       if (!spi) {
-               dev_err(cs35l41->dev, "%s: No SPI device\n", __func__);
-               return;
-       }
-
-       if (is_pre_setup) {
-               orig_spi_freq = spi->max_speed_hz;
-               if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) {
-                       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP;
-                       spi_setup(spi);
-               }
-               *freq = orig_spi_freq;
-       } else {
-               if (spi->max_speed_hz != *freq) {
-                       spi->max_speed_hz = *freq;
-                       spi_setup(spi);
-               }
-       }
-}
-
 static int cs35l41_spi_probe(struct spi_device *spi)
 {
        const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
@@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi)
        if (!cs35l41)
                return -ENOMEM;
 
+       spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+       spi_setup(spi);
+
        spi_set_drvdata(spi, cs35l41);
        cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
        if (IS_ERR(cs35l41->regmap)) {
@@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi)
 
        cs35l41->dev = &spi->dev;
        cs35l41->irq = spi->irq;
-       cs35l41->otp_setup = cs35l41_spi_otp_setup;
 
        return cs35l41_probe(cs35l41, pdata);
 }
index 9d0530dde996728892a178d1dd6ba52b89557df3..9c4d481f7614c6a61edb065cfd45b7603afc9c0f 100644 (file)
@@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data)
        const struct cs35l41_otp_packed_element_t *otp_map;
        struct cs35l41_private *cs35l41 = data;
        int bit_offset, word_offset, ret, i;
-       unsigned int orig_spi_freq;
        unsigned int bit_sum = 8;
        u32 otp_val, otp_id_reg;
        u32 *otp_mem;
@@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, true, &orig_spi_freq);
-
        ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem,
                               CS35L41_OTP_SIZE_WORDS);
        if (ret < 0) {
@@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data)
                goto err_otp_unpack;
        }
 
-       if (cs35l41->otp_setup)
-               cs35l41->otp_setup(cs35l41, false, &orig_spi_freq);
-
        otp_map = otp_map_match->map;
 
        bit_offset = otp_map_match->bit_offset;
index 6cffe8a55beb1b9789f16f06c0ef0f94250f401c..48485b08a6f1fdb5dde5c14d20fba978d90268a7 100644 (file)
 #define CS35L41_FS2_WINDOW_MASK                0x00FFF800
 #define CS35L41_FS2_WINDOW_SHIFT       12
 
-#define CS35L41_SPI_MAX_FREQ_OTP       4000000
+#define CS35L41_SPI_MAX_FREQ           4000000
 
 #define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 #define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
@@ -764,8 +764,6 @@ struct cs35l41_private {
        int irq;
        /* GPIO for /RST */
        struct gpio_desc *reset_gpio;
-       void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup,
-                         unsigned int *freq);
 };
 
 int cs35l41_probe(struct cs35l41_private *cs35l41,
index 943d7d933e81b3325e3ae09a25df7c1ada8e422d..03f24edfe4f6492b86fe8fc97a3ae362b6083e50 100644 (file)
@@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver);
 MODULE_DESCRIPTION("ASoC RK817 codec driver");
 MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rk817-codec");
index 04cb747c2b125741970b58a9ac1df71dbc308765..b34a8542077dcfde3e20fcccb77f2a22cdcb39c7 100644 (file)
@@ -929,6 +929,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
        unsigned int val, count;
 
        if (jack_insert) {
+               snd_soc_dapm_mutex_lock(dapm);
+
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
                        RT5682_PWR_VREF2 | RT5682_PWR_MB,
                        RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -979,6 +981,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
                        RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
                        RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
+
+               snd_soc_dapm_mutex_unlock(dapm);
        } else {
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -2858,6 +2862,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
 
        for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682->dai_clks_hw[i];
 
@@ -2865,17 +2871,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
                case RT5682_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
index 470957fcad6b6bdf69a1565a94ee4c4c00acdc6d..d49a4f68566d21e4de7e43595ceb99be4872d9ac 100644 (file)
@@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
 
        for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) {
                struct clk_init_data init = { };
+               struct clk_parent_data parent_data;
+               const struct clk_hw *parent;
 
                dai_clk_hw = &rt5682s->dai_clks_hw[i];
 
@@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
                case RT5682S_DAI_WCLK_IDX:
                        /* Make MCLK the parent of WCLK */
                        if (rt5682s->mclk) {
-                               init.parent_data = &(struct clk_parent_data){
+                               parent_data = (struct clk_parent_data){
                                        .fw_name = "mclk",
                                };
+                               init.parent_data = &parent_data;
                                init.num_parents = 1;
                        }
                        break;
                case RT5682S_DAI_BCLK_IDX:
                        /* Make WCLK the parent of BCLK */
-                       init.parent_hws = &(const struct clk_hw *){
-                               &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]
-                       };
+                       parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX];
+                       init.parent_hws = &parent;
                        init.num_parents = 1;
                        break;
                default:
index 172e79cbe0daf5e27df04a0200f73aa420890837..6549e7fef3e323ed31cab060f6f6d2b5212220b3 100644 (file)
@@ -291,11 +291,11 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
                                TAS2770_TDM_CFG_REG0_31_88_2_96KHZ;
                break;
-       case 19200:
+       case 192000:
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_48KHZ |
                                TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
                break;
-       case 17640:
+       case 176400:
                ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
                                TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
                break;
index 4f568abd59e24a2f102c5e0eee37869046dee3bc..e63c6b723d76c44aa8c756ccf7c9f28b405f14f3 100644 (file)
@@ -3256,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        int value = ucontrol->value.integer.value[0];
        int sel;
 
+       if (wcd->comp_enabled[comp] == value)
+               return 0;
+
        wcd->comp_enabled[comp] = value;
        sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
                WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
@@ -3279,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
        case COMPANDER_8:
                break;
        default:
-               break;
+               return 0;
        }
 
-       return 0;
+       return 1;
 }
 
 static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
@@ -3326,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
        return 0;
 }
 
+static int slim_rx_mux_to_dai_id(int mux)
+{
+       int aif_id;
+
+       switch (mux) {
+       case 1:
+               aif_id = AIF1_PB;
+               break;
+       case 2:
+               aif_id = AIF2_PB;
+               break;
+       case 3:
+               aif_id = AIF3_PB;
+               break;
+       case 4:
+               aif_id = AIF4_PB;
+               break;
+       default:
+               aif_id = -1;
+               break;
+       }
+
+       return aif_id;
+}
+
 static int slim_rx_mux_put(struct snd_kcontrol *kc,
                           struct snd_ctl_elem_value *ucontrol)
 {
@@ -3333,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
        struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
        struct soc_enum *e = (struct soc_enum *)kc->private_value;
        struct snd_soc_dapm_update *update = NULL;
+       struct wcd934x_slim_ch *ch, *c;
        u32 port_id = w->shift;
+       bool found = false;
+       int mux_idx;
+       int prev_mux_idx = wcd->rx_port_value[port_id];
+       int aif_id;
 
-       if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
-               return 0;
+       mux_idx = ucontrol->value.enumerated.item[0];
 
-       wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+       if (mux_idx == prev_mux_idx)
+               return 0;
 
-       switch (wcd->rx_port_value[port_id]) {
+       switch(mux_idx) {
        case 0:
-               list_del_init(&wcd->rx_chs[port_id].list);
-               break;
-       case 1:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF1_PB].slim_ch_list);
-               break;
-       case 2:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF2_PB].slim_ch_list);
-               break;
-       case 3:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF3_PB].slim_ch_list);
+               aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
+                       if (ch->port == port_id + WCD934X_RX_START) {
+                               found = true;
+                               list_del_init(&ch->list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+
                break;
-       case 4:
-               list_add_tail(&wcd->rx_chs[port_id].list,
-                             &wcd->dai[AIF4_PB].slim_ch_list);
+       case 1 ... 4:
+               aif_id = slim_rx_mux_to_dai_id(mux_idx);
+               if (aif_id < 0)
+                       return 0;
+
+               if (list_empty(&wcd->rx_chs[port_id].list)) {
+                       list_add_tail(&wcd->rx_chs[port_id].list,
+                                     &wcd->dai[aif_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
+                       return 0;
+               }
                break;
+
        default:
-               dev_err(wcd->dev, "Unknown AIF %d\n",
-                       wcd->rx_port_value[port_id]);
+               dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
                goto err;
        }
 
+       wcd->rx_port_value[port_id] = mux_idx;
        snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
                                      e, update);
 
-       return 0;
+       return 1;
 err:
        return -EINVAL;
 }
@@ -3815,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        struct soc_mixer_control *mixer =
                        (struct soc_mixer_control *)kc->private_value;
        int enable = ucontrol->value.integer.value[0];
+       struct wcd934x_slim_ch *ch, *c;
        int dai_id = widget->shift;
        int port_id = mixer->shift;
 
@@ -3822,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
        if (enable == wcd->tx_port_value[port_id])
                return 0;
 
-       wcd->tx_port_value[port_id] = enable;
-
-       if (enable)
-               list_add_tail(&wcd->tx_chs[port_id].list,
-                             &wcd->dai[dai_id].slim_ch_list);
-       else
-               list_del_init(&wcd->tx_chs[port_id].list);
+       if (enable) {
+               if (list_empty(&wcd->tx_chs[port_id].list)) {
+                       list_add_tail(&wcd->tx_chs[port_id].list,
+                                     &wcd->dai[dai_id].slim_ch_list);
+               } else {
+                       dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
+                       return 0;
+               }
+        } else {
+               bool found = false;
+
+               list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
+                       if (ch->port == port_id) {
+                               found = true;
+                               list_del_init(&wcd->tx_chs[port_id].list);
+                               break;
+                       }
+               }
+               if (!found)
+                       return 0;
+        }
 
+       wcd->tx_port_value[port_id] = enable;
        snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
 
-       return 0;
+       return 1;
 }
 
 static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
index 2da4a5fa7a18d85a5181f589b0be4adb9cca3030..564b78f3cdd0a1d8c7e769c1d2db2b9417cae488 100644 (file)
@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
 
                usleep_range(1000, 1010);
        }
-       return 0;
+
+       return 1;
 }
 
 static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        int portidx = mixer->reg;
 
-       if (ucontrol->value.integer.value[0])
+       if (ucontrol->value.integer.value[0]) {
+               if (data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = true;
-       else
+       } else {
+               if (!data->port_enable[portidx])
+                       return 0;
+
                data->port_enable[portidx] = false;
+       }
 
        if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
                wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
 
-       return 0;
+       return 1;
 }
 
 static const char * const smart_boost_lvl_text[] = {
index b4eb0c97edf1c486e1608a54ae8e348823609c6d..4eebc79d4b486df60b07490e6f1cb19a8401f181 100644 (file)
@@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
                .sof_fw_filename = "sof-cml.ri",
                .sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
        },
+       {
+               .id = "ESSX8336",
+               .drv_name = "sof-essx8336",
+               .sof_fw_filename = "sof-cml.ri",
+               .sof_tplg_filename = "sof-cml-es8336.tplg",
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
index 9322245521463712fccb693448f0fa416aa7e12d..67729de41a73ef5981a0bb757a71817cfd41fd17 100644 (file)
@@ -18,7 +18,6 @@
 #define AIU_RST_SOFT_I2S_FAST          BIT(0)
 
 #define AIU_I2S_DAC_CFG_MSB_FIRST      BIT(2)
-#define AIU_I2S_MISC_HOLD_EN           BIT(2)
 #define AIU_CLK_CTRL_I2S_DIV_EN                BIT(0)
 #define AIU_CLK_CTRL_I2S_DIV           GENMASK(3, 2)
 #define AIU_CLK_CTRL_AOCLK_INVERT      BIT(6)
@@ -36,37 +35,6 @@ static void aiu_encoder_i2s_divider_enable(struct snd_soc_component *component,
                                      enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0);
 }
 
-static void aiu_encoder_i2s_hold(struct snd_soc_component *component,
-                                bool enable)
-{
-       snd_soc_component_update_bits(component, AIU_I2S_MISC,
-                                     AIU_I2S_MISC_HOLD_EN,
-                                     enable ? AIU_I2S_MISC_HOLD_EN : 0);
-}
-
-static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
-                                  struct snd_soc_dai *dai)
-{
-       struct snd_soc_component *component = dai->component;
-
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-       case SNDRV_PCM_TRIGGER_RESUME:
-       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               aiu_encoder_i2s_hold(component, false);
-               return 0;
-
-       case SNDRV_PCM_TRIGGER_STOP:
-       case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               aiu_encoder_i2s_hold(component, true);
-               return 0;
-
-       default:
-               return -EINVAL;
-       }
-}
-
 static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component,
                                      struct snd_pcm_hw_params *params)
 {
@@ -353,7 +321,6 @@ static void aiu_encoder_i2s_shutdown(struct snd_pcm_substream *substream,
 }
 
 const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = {
-       .trigger        = aiu_encoder_i2s_trigger,
        .hw_params      = aiu_encoder_i2s_hw_params,
        .hw_free        = aiu_encoder_i2s_hw_free,
        .set_fmt        = aiu_encoder_i2s_set_fmt,
index 2388a2d0b3a6c5206d0521fb17006aa2848c1b85..57e6e7160d2f25e8bbf39024a2be5abf7e537cd3 100644 (file)
@@ -20,6 +20,8 @@
 #define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6)
 #define AIU_MEM_I2S_BUF_CNTL_INIT      BIT(0)
 #define AIU_RST_SOFT_I2S_FAST          BIT(0)
+#define AIU_I2S_MISC_HOLD_EN           BIT(2)
+#define AIU_I2S_MISC_FORCE_LEFT_RIGHT  BIT(4)
 
 #define AIU_FIFO_I2S_BLOCK             256
 
@@ -90,6 +92,10 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
        unsigned int val;
        int ret;
 
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_HOLD_EN,
+                                     AIU_I2S_MISC_HOLD_EN);
+
        ret = aiu_fifo_hw_params(substream, params, dai);
        if (ret)
                return ret;
@@ -117,6 +123,19 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
        snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS,
                                      AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
 
+       /*
+        * Most (all?) supported SoCs have this bit set by default. The vendor
+        * driver however sets it manually (depending on the version either
+        * while un-setting AIU_I2S_MISC_HOLD_EN or right before that). Follow
+        * the same approach for consistency with the vendor driver.
+        */
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_FORCE_LEFT_RIGHT,
+                                     AIU_I2S_MISC_FORCE_LEFT_RIGHT);
+
+       snd_soc_component_update_bits(component, AIU_I2S_MISC,
+                                     AIU_I2S_MISC_HOLD_EN, 0);
+
        return 0;
 }
 
index 4ad23267cace58e9b35dc38d6d56b7a8288a1f4b..d67ff4cdabd5af8bb3238bc9b8524f794395e2a4 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/bitfield.h>
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 #include <sound/soc-dai.h>
@@ -179,6 +180,11 @@ int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
        struct snd_card *card = rtd->card->snd_card;
        struct aiu_fifo *fifo = dai->playback_dma_data;
        size_t size = fifo->pcm->buffer_bytes_max;
+       int ret;
+
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
                                       card->dev, size, size);
index cd74681e811e1fb6fae0d3e8593fd0de39c1a456..928fd23e2c27271a49a2376ffdf86ad3e85fa82b 100644 (file)
@@ -498,14 +498,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
        struct session_data *session = &data->sessions[session_id];
 
        if (ucontrol->value.integer.value[0]) {
+               if (session->port_id == be_id)
+                       return 0;
+
                session->port_id = be_id;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
        } else {
-               if (session->port_id == be_id) {
-                       session->port_id = -1;
+               if (session->port_id == -1 || session->port_id != be_id)
                        return 0;
-               }
 
+               session->port_id = -1;
                snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
        }
 
index 17b9b287853a164ab2b2310ceb1186f3d441ef6b..5f9cb5c4c7f090780506e01b8a719d619bb797b1 100644 (file)
@@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev {
        spinlock_t lock; /* xfer lock */
        bool has_playback;
        bool has_capture;
+       struct snd_soc_dai_driver *dai;
 };
 
 static int to_ch_num(unsigned int val)
@@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = {
        {},
 };
 
-static struct snd_soc_dai_driver i2s_tdm_dai = {
+static const struct snd_soc_dai_driver i2s_tdm_dai = {
        .probe = rockchip_i2s_tdm_dai_probe,
-       .playback = {
-               .stream_name  = "Playback",
-       },
-       .capture = {
-               .stream_name  = "Capture",
-       },
        .ops = &rockchip_i2s_tdm_dai_ops,
 };
 
-static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
+static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
 {
+       struct snd_soc_dai_driver *dai;
        struct property *dma_names;
        const char *dma_name;
        u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
@@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
                        i2s_tdm->has_capture = true;
        }
 
+       dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai,
+                          sizeof(*dai), GFP_KERNEL);
+       if (!dai)
+               return -ENOMEM;
+
        if (i2s_tdm->has_playback) {
-               i2s_tdm_dai.playback.channels_min = 2;
-               i2s_tdm_dai.playback.channels_max = 8;
-               i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.playback.formats = formats;
+               dai->playback.stream_name  = "Playback";
+               dai->playback.channels_min = 2;
+               dai->playback.channels_max = 8;
+               dai->playback.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->playback.formats = formats;
        }
 
        if (i2s_tdm->has_capture) {
-               i2s_tdm_dai.capture.channels_min = 2;
-               i2s_tdm_dai.capture.channels_max = 8;
-               i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000;
-               i2s_tdm_dai.capture.formats = formats;
+               dai->capture.stream_name  = "Capture";
+               dai->capture.channels_min = 2;
+               dai->capture.channels_max = 8;
+               dai->capture.rates = SNDRV_PCM_RATE_8000_192000;
+               dai->capture.formats = formats;
        }
+
+       if (i2s_tdm->clk_trcm != TRCM_TXRX)
+               dai->symmetric_rate = 1;
+
+       i2s_tdm->dai = dai;
+
+       return 0;
 }
 
 static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm,
@@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
        spin_lock_init(&i2s_tdm->lock);
        i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data;
 
-       rockchip_i2s_tdm_init_dai(i2s_tdm);
-
        i2s_tdm->frame_width = 64;
 
        i2s_tdm->clk_trcm = TRCM_TXRX;
@@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
                }
                i2s_tdm->clk_trcm = TRCM_RX;
        }
-       if (i2s_tdm->clk_trcm != TRCM_TXRX)
-               i2s_tdm_dai.symmetric_rate = 1;
+
+       ret = rockchip_i2s_tdm_init_dai(i2s_tdm);
+       if (ret)
+               return ret;
 
        i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
        if (IS_ERR(i2s_tdm->grf))
@@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
 
        ret = devm_snd_soc_register_component(&pdev->dev,
                                              &rockchip_i2s_tdm_component,
-                                             &i2s_tdm_dai, 1);
+                                             i2s_tdm->dai, 1);
 
        if (ret) {
                dev_err(&pdev->dev, "Could not register DAI\n");
index 2ae99b49d3f5f6b72c5990f05432bf06c588d888..cbd7ea48837b246ddcface067b0eff746fec59da 100644 (file)
@@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine)
 
        if (comp_ids) {
                for (i = 0; i < comp_ids->num_codecs; i++) {
-                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1))
+                       if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) {
+                               strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN);
                                return true;
+                       }
                }
        }
 
index 6744318de612e567ceab15b22df953686222a50e..13cd96e6724a49a999ec743f9a5c24f9d11536f9 100644 (file)
@@ -22,6 +22,7 @@
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
 #define IDISP_VID_INTEL        0x80860000
+#define CODEC_PROBE_RETRIES 3
 
 /* load the legacy HDA codec driver */
 static int request_codec_module(struct hda_codec *codec)
@@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
        u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
                (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
        u32 resp = -1;
-       int ret;
+       int ret, retry = 0;
+
+       do {
+               mutex_lock(&hbus->core.cmd_mutex);
+               snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+               snd_hdac_bus_get_response(&hbus->core, address, &resp);
+               mutex_unlock(&hbus->core.cmd_mutex);
+       } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
 
-       mutex_lock(&hbus->core.cmd_mutex);
-       snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
-       snd_hdac_bus_get_response(&hbus->core, address, &resp);
-       mutex_unlock(&hbus->core.cmd_mutex);
        if (resp == -1)
                return -EIO;
        dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
index 568d351b7a4e97cb5f9beb3a612c8cf709e56dae..2c0d4d06ab364125a4a3a987c1000385905a7042 100644 (file)
@@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w)
                return -EINVAL;
        }
 
+       /* DAI already configured, reset it before reconfiguring it */
+       if (sof_dai->configured) {
+               ret = hda_ctrl_dai_widget_free(w);
+               if (ret < 0)
+                       return ret;
+       }
+
        config = &sof_dai->dai_config[sof_dai->current_config];
 
        /*
index f2ea34df9741da6177ccdcfb3b2254a92a32eb7b..fd46210f17303b41f83d7e8db79d18cf76598509 100644 (file)
@@ -112,8 +112,12 @@ static const struct pci_device_id sof_pci_ids[] = {
                .driver_data = (unsigned long)&adls_desc},
        { PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
                .driver_data = (unsigned long)&adl_desc},
+       { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */
+               .driver_data = (unsigned long)&adl_desc},
        { PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
                .driver_data = (unsigned long)&adl_desc},
+       { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
+               .driver_data = (unsigned long)&adl_desc},
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, sof_pci_ids);
index 8ee9a77bd83d375832de31094be44878093ecfdd..a74c980ee77539cd53dd8006eaea1487b8ecc653 100644 (file)
@@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = {
        { TEGRA186_DSPK_CODEC_CTRL,  0x03000000 },
 };
 
-static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
 
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dspk->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dspk->lrsel;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dspk->ch_sel;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dspk->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dspk->stereo_to_mono;
+       ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
 
        return 0;
 }
 
-static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
        struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
-       int val = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "FIFO Threshold"))
-               dspk->rx_fifo_th = val;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dspk->osr_val = val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dspk->lrsel = val;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dspk->ch_sel = val;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dspk->mono_to_stereo = val;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dspk->stereo_to_mono = val;
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dspk->rx_fifo_th)
+               return 0;
+
+       dspk->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->osr_val;
 
        return 0;
 }
 
+static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->osr_val)
+               return 0;
+
+       dspk->osr_val = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->lrsel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->lrsel)
+               return 0;
+
+       dspk->lrsel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->ch_sel;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->ch_sel)
+               return 0;
+
+       dspk->ch_sel = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->mono_to_stereo)
+               return 0;
+
+       dspk->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+       ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+       struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dspk->stereo_to_mono)
+               return 0;
+
+       dspk->stereo_to_mono = value;
+
+       return 1;
+}
+
 static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
 {
        struct tegra186_dspk *dspk = dev_get_drvdata(dev);
@@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum =
 static const struct snd_kcontrol_new tegrat186_dspk_controls[] = {
        SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0,
                       TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0,
-                      tegra186_dspk_get_control, tegra186_dspk_put_control),
+                      tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th),
        SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel),
        SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel),
        SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_mono_to_stereo,
+                    tegra186_dspk_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum,
-                    tegra186_dspk_get_control, tegra186_dspk_put_control),
+                    tegra186_dspk_get_stereo_to_mono,
+                    tegra186_dspk_put_stereo_to_mono),
 };
 
 static const struct snd_soc_component_driver tegra186_dspk_cmpnt = {
index bcccdf3ddc528b2d2ccea759390c0564c1ea6c86..1a2e868a6220932f48f6eab1150523e1c9ee37dd 100644 (file)
@@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = {
        .trigger        = tegra_admaif_trigger,
 };
 
-static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+       ucontrol->value.enumerated.item[0] =
+               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
+
+       return 0;
+}
+
+static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
 
        return 0;
 }
 
-static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
        struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
+static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
 
-       if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+       ucontrol->value.enumerated.item[0] =
+               admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
 
        return 0;
 }
 
+static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+       struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg])
+               return 0;
+
+       admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+
+       return 1;
+}
+
 static int tegra_admaif_dai_probe(struct snd_soc_dai *dai)
 {
        struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai);
@@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = {
 }
 
 #define TEGRA_ADMAIF_CIF_CTRL(reg)                                            \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,     \
+                       tegra210_admaif_pget_mono_to_stereo,                   \
+                       tegra210_admaif_pput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,     \
+                       tegra210_admaif_pget_stereo_to_mono,                   \
+                       tegra210_admaif_pput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text),                        \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1,      \
+                       tegra210_admaif_cget_mono_to_stereo,                   \
+                       tegra210_admaif_cput_mono_to_stereo,                   \
                        tegra_admaif_mono_conv_text),                          \
-       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
-                       tegra_admaif_get_control, tegra_admaif_put_control,    \
+       NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1,      \
+                       tegra210_admaif_cget_stereo_to_mono,                   \
+                       tegra210_admaif_cput_stereo_to_mono,                   \
                        tegra_admaif_stereo_conv_text)
 
 static struct snd_kcontrol_new tegra210_admaif_controls[] = {
index d7c7849c2f92c999b8463b7715284e6555fb74b7..3785cade2d9a94b21a6e31cfdddd2019f616d351 100644 (file)
@@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;;
 
+       if (value == bytes_map[mc->reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* update byte map and enable slot */
                bytes_map[mc->reg] = value;
@@ -511,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_adx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend,
                           tegra210_adx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_adx_driver = {
index a1989eae2b525ad818245c56c588b8da0aa55997..388b815443c7d1dff33617667a592e91ab176153 100644 (file)
@@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
        unsigned int *item = uctl->value.enumerated.item;
        unsigned int value = e->values[item[0]];
        unsigned int i, bit_pos, reg_idx = 0, reg_val = 0;
+       int change = 0;
 
        if (item[0] >= e->items)
                return -EINVAL;
@@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
 
                /* Update widget power if state has changed */
                if (snd_soc_component_test_bits(cmpnt, update[i].reg,
-                                               update[i].mask, update[i].val))
-                       snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e,
-                                                     &update[i]);
+                                               update[i].mask,
+                                               update[i].val))
+                       change |= snd_soc_dapm_mux_update_power(dapm, kctl,
+                                                               item[0], e,
+                                                               &update[i]);
        }
 
-       return 0;
+       return change;
 }
 
 static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
index af9bddfc312073d70ded76d3deffb2d5bf637fe7..d064cc67fea66654499d9e73e629463de38807e0 100644 (file)
@@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
        int reg = mc->reg;
        int value = ucontrol->value.integer.value[0];
 
+       if (value == bytes_map[reg])
+               return 0;
+
        if (value >= 0 && value <= 255) {
                /* Update byte map and enable slot */
                bytes_map[reg] = value;
@@ -580,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_amx_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend,
                           tegra210_amx_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_amx_driver = {
index b096478cd2ef0f65eb30239b1e781c0831ec8fad..db95794530f4678b322dabc1ab13dedb552acfea 100644 (file)
@@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.integer.value[0] = dmic->boost_gain;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == dmic->boost_gain)
+               return 0;
+
+       dmic->boost_gain = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->ch_select;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->ch_select)
+               return 0;
+
+       dmic->ch_select = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->mono_to_stereo)
+               return 0;
+
+       dmic->mono_to_stereo = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
+
+       return 0;
+}
+
+static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->stereo_to_mono)
+               return 0;
+
+       dmic->stereo_to_mono = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               ucontrol->value.integer.value[0] = dmic->boost_gain;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               ucontrol->value.integer.value[0] = dmic->ch_select;
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               ucontrol->value.integer.value[0] = dmic->mono_to_stereo;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               ucontrol->value.integer.value[0] = dmic->stereo_to_mono;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               ucontrol->value.integer.value[0] = dmic->osr_val;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               ucontrol->value.integer.value[0] = dmic->lrsel;
+       ucontrol->value.enumerated.item[0] = dmic->osr_val;
 
        return 0;
 }
 
-static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
                                     struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
-       int value = ucontrol->value.integer.value[0];
+       unsigned int value = ucontrol->value.enumerated.item[0];
 
-       if (strstr(kcontrol->id.name, "Boost Gain Volume"))
-               dmic->boost_gain = value;
-       else if (strstr(kcontrol->id.name, "Channel Select"))
-               dmic->ch_select = ucontrol->value.integer.value[0];
-       else if (strstr(kcontrol->id.name, "Mono To Stereo"))
-               dmic->mono_to_stereo = value;
-       else if (strstr(kcontrol->id.name, "Stereo To Mono"))
-               dmic->stereo_to_mono = value;
-       else if (strstr(kcontrol->id.name, "OSR Value"))
-               dmic->osr_val = value;
-       else if (strstr(kcontrol->id.name, "LR Polarity Select"))
-               dmic->lrsel = value;
+       if (value == dmic->osr_val)
+               return 0;
+
+       dmic->osr_val = value;
+
+       return 1;
+}
+
+static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+       ucontrol->value.enumerated.item[0] = dmic->lrsel;
 
        return 0;
 }
 
+static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == dmic->lrsel)
+               return 0;
+
+       dmic->lrsel = value;
+
+       return 1;
+}
+
 static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
        .hw_params      = tegra210_dmic_hw_params,
 };
@@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum =
 
 static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
        SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
-                      tegra210_dmic_get_control, tegra210_dmic_put_control),
+                      tegra210_dmic_get_boost_gain,
+                      tegra210_dmic_put_boost_gain),
        SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
        SOC_ENUM_EXT("Mono To Stereo",
-                    tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_mono_conv_enum,
+                    tegra210_dmic_get_mono_to_stereo,
+                    tegra210_dmic_put_mono_to_stereo),
        SOC_ENUM_EXT("Stereo To Mono",
-                    tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control,
-                    tegra210_dmic_put_control),
+                    tegra210_dmic_stereo_conv_enum,
+                    tegra210_dmic_get_stereo_to_mono,
+                    tegra210_dmic_put_stereo_to_mono),
        SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
        SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
-                    tegra210_dmic_get_control, tegra210_dmic_put_control),
+                    tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
 };
 
 static const struct snd_soc_component_driver tegra210_dmic_compnt = {
index 45f31ccb49d89ee2fc195e64e1e89a180c45f862..9552bbb939dd1e9343ad989a5bafb036a75db0e0 100644 (file)
@@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
-static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
-                                          unsigned int ratio)
+static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
 {
-       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
 
-       i2s->bclk_ratio = ratio;
+       ucontrol->value.integer.value[0] = i2s->loopback;
 
        return 0;
 }
 
-static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->loopback)
+               return 0;
+
+       i2s->loopback = value;
+
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
+                          i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
-       long *uctl_val = &ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Loopback"))
-               *uctl_val = i2s->loopback;
-       else if (strstr(kcontrol->id.name, "FSYNC Width"))
-               *uctl_val = i2s->fsync_width;
-       else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
-               *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
-               *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Playback FIFO Threshold"))
-               *uctl_val = i2s->rx_fifo_th;
-       else if (strstr(kcontrol->id.name, "BCLK Ratio"))
-               *uctl_val = i2s->bclk_ratio;
+
+       ucontrol->value.integer.value[0] = i2s->fsync_width;
 
        return 0;
 }
 
-static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol,
-                                   struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
+                                       struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
        int value = ucontrol->value.integer.value[0];
 
-       if (strstr(kcontrol->id.name, "Loopback")) {
-               i2s->loopback = value;
+       if (value == i2s->fsync_width)
+               return 0;
 
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_LPBK_MASK,
-                                  i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+       i2s->fsync_width = value;
 
-       } else if (strstr(kcontrol->id.name, "FSYNC Width")) {
-               /*
-                * Frame sync width is used only for FSYNC modes and not
-                * applicable for LRCK modes. Reset value for this field is "0",
-                * which means the width is one bit clock wide.
-                * The width requirement may depend on the codec and in such
-                * cases mixer control is used to update custom values. A value
-                * of "N" here means, width is "N + 1" bit clock wide.
-                */
-               i2s->fsync_width = value;
-
-               regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
-                                  I2S_CTRL_FSYNC_WIDTH_MASK,
-                                  i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
-
-       } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_TX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) {
-               i2s->stereo_to_mono[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) {
-               i2s->mono_to_stereo[I2S_RX_PATH] = value;
-       } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) {
-               i2s->rx_fifo_th = value;
-       } else if (strstr(kcontrol->id.name, "BCLK Ratio")) {
-               i2s->bclk_ratio = value;
-       }
+       /*
+        * Frame sync width is used only for FSYNC modes and not
+        * applicable for LRCK modes. Reset value for this field is "0",
+        * which means the width is one bit clock wide.
+        * The width requirement may depend on the codec and in such
+        * cases mixer control is used to update custom values. A value
+        * of "N" here means, width is "N + 1" bit clock wide.
+        */
+       regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
+                          I2S_CTRL_FSYNC_WIDTH_MASK,
+                          i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_TX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_TX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->stereo_to_mono[I2S_RX_PATH])
+               return 0;
+
+       i2s->stereo_to_mono[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                           struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == i2s->mono_to_stereo[I2S_RX_PATH])
+               return 0;
+
+       i2s->mono_to_stereo[I2S_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
+
+       return 0;
+}
+
+static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->rx_fifo_th)
+               return 0;
+
+       i2s->rx_fifo_th = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+       ucontrol->value.integer.value[0] = i2s->bclk_ratio;
+
+       return 0;
+}
+
+static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
+                                      struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+       int value = ucontrol->value.integer.value[0];
+
+       if (value == i2s->bclk_ratio)
+               return 0;
+
+       i2s->bclk_ratio = value;
+
+       return 1;
+}
+
+static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
+                                          unsigned int ratio)
+{
+       struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+       i2s->bclk_ratio = ratio;
 
        return 0;
 }
@@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum =
                        tegra210_i2s_stereo_conv_text);
 
 static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
-       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+       SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
+                      tegra210_i2s_put_loopback),
+       SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
+                      tegra210_i2s_get_fsync_width,
+                      tegra210_i2s_put_fsync_width),
        SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_stereo_to_mono,
+                    tegra210_i2s_cput_stereo_to_mono),
        SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_cget_mono_to_stereo,
+                    tegra210_i2s_cput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_mono_to_stereo,
+                    tegra210_i2s_pput_mono_to_stereo),
        SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
-                    tegra210_i2s_get_control, tegra210_i2s_put_control),
+                    tegra210_i2s_pget_stereo_to_mono,
+                    tegra210_i2s_pput_stereo_to_mono),
        SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
-                      0, tegra210_i2s_get_control, tegra210_i2s_put_control),
-       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control,
-                      tegra210_i2s_put_control),
+                      0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
+       SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
+                      tegra210_i2s_get_bclk_ratio,
+                      tegra210_i2s_put_bclk_ratio),
 };
 
 static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
index 55e61776c565ad68cb24f86e59953e4bb804c69c..16e679a9565825c41bc3c2a0646aa67439efbfc0 100644 (file)
@@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
-                                  struct snd_ctl_elem_value *ucontrol)
+static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol,
+                                    struct snd_ctl_elem_value *ucontrol,
+                                    bool instant_gain)
 {
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt);
        unsigned int reg = mc->reg, id;
-       bool instant_gain = false;
        int err;
 
-       if (strstr(kcontrol->id.name, "Instant Gain Volume"))
-               instant_gain = true;
-
        /* Save gain value for specific MIXER input */
        id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) /
             TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE;
 
+       if (mixer->gain_value[id] == ucontrol->value.integer.value[0])
+               return 0;
+
        mixer->gain_value[id] = ucontrol->value.integer.value[0];
 
        err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain);
@@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
        return 1;
 }
 
+static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
+                                  struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, false);
+}
+
+static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       return tegra210_mixer_apply_gain(kcontrol, ucontrol, true);
+}
+
 static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer,
                                        struct snd_pcm_hw_params *params,
                                        unsigned int reg,
@@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG);
        SOC_SINGLE_EXT("RX" #id " Instant Gain Volume",         \
                       MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0,    \
                       0x20000, 0, tegra210_mixer_get_gain,     \
-                      tegra210_mixer_put_gain),
+                      tegra210_mixer_put_instant_gain),
 
 /* Volume controls for all MIXER inputs */
 static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = {
@@ -654,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend,
                           tegra210_mixer_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mixer_driver = {
index 7b9c7006e4197e5fc3537b448de461285b019a2a..acf59328dcb6dedd5c9f324b7d0b1248152a08d2 100644 (file)
@@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
        unsigned int value;
-       u8 mute_mask;
+       u8 new_mask, old_mask;
        int err;
 
        pm_runtime_get_sync(cmpnt->dev);
@@ -148,15 +148,23 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
        if (err < 0)
                goto end;
 
-       mute_mask = ucontrol->value.integer.value[0];
+       regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value);
+
+       old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN;
+       new_mask = ucontrol->value.integer.value[0];
+
+       if (new_mask == old_mask) {
+               err = 0;
+               goto end;
+       }
 
        err = regmap_update_bits(mvc->regmap, mc->reg,
                                 TEGRA210_MVC_MUTE_MASK,
-                                mute_mask << TEGRA210_MVC_MUTE_SHIFT);
+                                new_mask << TEGRA210_MVC_MUTE_SHIFT);
        if (err < 0)
                goto end;
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
        unsigned int reg = mc->reg;
        unsigned int value;
        u8 chan;
-       int err;
+       int err, old_volume;
 
        pm_runtime_get_sync(cmpnt->dev);
 
@@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                goto end;
 
        chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE;
+       old_volume = mvc->volume[chan];
 
        tegra210_mvc_conv_vol(mvc, chan,
                              ucontrol->value.integer.value[0]);
 
+       if (mvc->volume[chan] == old_volume) {
+               err = 0;
+               goto end;
+       }
+
        /* Configure init volume same as target volume */
        regmap_write(mvc->regmap,
                TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan),
@@ -222,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
                           TEGRA210_MVC_VOLUME_SWITCH_MASK,
                           TEGRA210_MVC_VOLUME_SWITCH_TRIGGER);
 
-       return 1;
+       err = 1;
 
 end:
        pm_runtime_put(cmpnt->dev);
@@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol,
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
 
-       ucontrol->value.integer.value[0] = mvc->curve_type;
+       ucontrol->value.enumerated.item[0] = mvc->curve_type;
 
        return 0;
 }
@@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
-       int value;
+       unsigned int value;
 
        regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value);
        if (value & TEGRA210_MVC_EN) {
@@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
                return -EINVAL;
        }
 
-       if (mvc->curve_type == ucontrol->value.integer.value[0])
+       if (mvc->curve_type == ucontrol->value.enumerated.item[0])
                return 0;
 
-       mvc->curve_type = ucontrol->value.integer.value[0];
+       mvc->curve_type = ucontrol->value.enumerated.item[0];
 
        tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev);
 
@@ -625,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_mvc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend,
                           tegra210_mvc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_mvc_driver = {
index dc477ee1b82cd06041bed5ca4940a21d856f1398..368f077e7bee750e92b36b8dd10a30f0abb2a72e 100644 (file)
@@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w,
        return tegra210_sfc_write_coeff_ram(cmpnt);
 }
 
-static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
 
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_RX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               ucontrol->value.integer.value[0] =
-                       sfc->stereo_to_mono[SFC_TX_PATH];
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               ucontrol->value.integer.value[0] =
-                       sfc->mono_to_stereo[SFC_TX_PATH];
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH];
 
        return 0;
 }
 
-static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol,
                                    struct snd_ctl_elem_value *ucontrol)
 {
        struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
        struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
-       int value = ucontrol->value.integer.value[0];
-
-       if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_RX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
-               sfc->stereo_to_mono[SFC_TX_PATH] = value;
-       else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
-               sfc->mono_to_stereo[SFC_TX_PATH] = value;
-       else
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_RX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_RX_PATH])
                return 0;
 
+       sfc->mono_to_stereo[SFC_RX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->stereo_to_mono[SFC_TX_PATH])
+               return 0;
+
+       sfc->stereo_to_mono[SFC_TX_PATH] = value;
+
+       return 1;
+}
+
+static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+       ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH];
+
+       return 0;
+}
+
+static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+                                   struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+       struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+       unsigned int value = ucontrol->value.enumerated.item[0];
+
+       if (value == sfc->mono_to_stereo[SFC_TX_PATH])
+               return 0;
+
+       sfc->mono_to_stereo[SFC_TX_PATH] = value;
+
        return 1;
 }
 
@@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum =
 
 static const struct snd_kcontrol_new tegra210_sfc_controls[] = {
        SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_stereo_to_mono,
+                    tegra210_sfc_iput_stereo_to_mono),
        SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_iget_mono_to_stereo,
+                    tegra210_sfc_iput_mono_to_stereo),
        SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_stereo_to_mono,
+                    tegra210_sfc_oput_stereo_to_mono),
        SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum,
-               tegra210_sfc_get_control, tegra210_sfc_put_control),
+                    tegra210_sfc_oget_mono_to_stereo,
+                    tegra210_sfc_oput_mono_to_stereo),
 };
 
 static const struct snd_soc_component_driver tegra210_sfc_cmpnt = {
@@ -3529,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev)
 static const struct dev_pm_ops tegra210_sfc_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend,
                           tegra210_sfc_runtime_resume, NULL)
-       SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                                    pm_runtime_force_resume)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static struct platform_driver tegra210_sfc_driver = {
index b95438c3dbf7ebf06d26a98293bc4acbe565b559..a73404879aa18f58a55b0cfd95c04d75c8dc3528 100644 (file)
@@ -116,16 +116,24 @@ static const struct snd_kcontrol_new tegra_machine_controls[] = {
        SOC_DAPM_PIN_SWITCH("Headset Mic"),
        SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
        SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+       SOC_DAPM_PIN_SWITCH("Headphones"),
+       SOC_DAPM_PIN_SWITCH("Mic Jack"),
 };
 
 int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_card *card = rtd->card;
        struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+       const char *jack_name;
        int err;
 
        if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) {
-               err = snd_soc_card_jack_new(card, "Headphones Jack",
+               if (machine->asoc->hp_jack_name)
+                       jack_name = machine->asoc->hp_jack_name;
+               else
+                       jack_name = "Headphones Jack";
+
+               err = snd_soc_card_jack_new(card, jack_name,
                                            SND_JACK_HEADPHONE,
                                            &tegra_machine_hp_jack,
                                            tegra_machine_hp_jack_pins,
@@ -658,6 +666,7 @@ static struct snd_soc_card snd_soc_tegra_max98090 = {
 static const struct tegra_asoc_data tegra_max98090_data = {
        .mclk_rate = tegra_machine_mclk_rate_12mhz,
        .card = &snd_soc_tegra_max98090,
+       .hp_jack_name = "Headphones",
        .add_common_dapm_widgets = true,
        .add_common_controls = true,
        .add_common_snd_ops = true,
index d6a8d13205516cca949e1fb75cf7d3dc6df86387..6f795d7dff7c1d6e154dfcc5607b48007efeb41b 100644 (file)
@@ -14,6 +14,7 @@ struct snd_soc_pcm_runtime;
 struct tegra_asoc_data {
        unsigned int (*mclk_rate)(unsigned int srate);
        const char *codec_dev_name;
+       const char *hp_jack_name;
        struct snd_soc_card *card;
        unsigned int mclk_id;
        bool hp_jack_gpio_active_low;
index d489c1de3baec1b84d87bf28ca6a849e0e1ab37c..823b6b8de942d70a58eee846e0dca680899af630 100644 (file)
@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
 
 
 static const struct snd_djm_device snd_djm_devices[] = {
-       SND_DJM_DEVICE(250mk2),
-       SND_DJM_DEVICE(750),
-       SND_DJM_DEVICE(750mk2),
-       SND_DJM_DEVICE(850),
-       SND_DJM_DEVICE(900nxs2)
+       [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
+       [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
+       [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+       [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+       [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
 };
 
 
index a59cb0ee609cd9e23b480a8371e64613135f0f01..73409e27be01f0834cb79fa6c8f3bc3ff65f4241 100644 (file)
@@ -83,6 +83,7 @@ struct btf_id {
                int      cnt;
        };
        int              addr_cnt;
+       bool             is_set;
        Elf64_Addr       addr[ADDR_CNT];
 };
 
@@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj)
                         * in symbol's size, together with 'cnt' field hence
                         * that - 1.
                         */
-                       if (id)
+                       if (id) {
                                id->cnt = sym.st_size / sizeof(int) - 1;
+                               id->is_set = true;
+                       }
                } else {
                        pr_err("FAILED unsupported prefix %s\n", prefix);
                        return -1;
@@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
        int *ptr = data->d_buf;
        int i;
 
-       if (!id->id) {
+       if (!id->id && !id->is_set)
                pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
-       }
 
        for (i = 0; i < id->addr_cnt; i++) {
                unsigned long addr = id->addr[i];
index 45a9a59828c3c09d420e8e149930363cec9668c2..ae61f464043a11fbe78d7fcebb5f2266a1b291a7 100644 (file)
@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC :=                  \
         numa_num_possible_cpus          \
         libperl                         \
         libpython                       \
-        libpython-version               \
         libslang                        \
         libslang-include-subdir         \
         libtraceevent                   \
index 0a3244ad967307cef0793d4f8df46eae96e48bd0..1480910c792e2cb3fc6f63f858cf089e18222d70 100644 (file)
@@ -32,7 +32,6 @@ FILES=                                          \
          test-numa_num_possible_cpus.bin        \
          test-libperl.bin                       \
          test-libpython.bin                     \
-         test-libpython-version.bin             \
          test-libslang.bin                      \
          test-libslang-include-subdir.bin       \
          test-libtraceevent.bin                 \
@@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin:
 $(OUTPUT)test-libpython.bin:
        $(BUILD) $(FLAGS_PYTHON_EMBED)
 
-$(OUTPUT)test-libpython-version.bin:
-       $(BUILD)
-
 $(OUTPUT)test-libbfd.bin:
        $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
 
index 0b243ce842be3383413fb6604d93fd7849dd7b94..5ffafb967b6e4952d4b4b3b023d9207291abb167 100644 (file)
 # include "test-libpython.c"
 #undef main
 
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
 #define main main_test_libperl
 # include "test-libperl.c"
 #undef main
 int main(int argc, char *argv[])
 {
        main_test_libpython();
-       main_test_libpython_version();
        main_test_libperl();
        main_test_hello();
        main_test_libelf();
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644 (file)
index 47714b9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
-       #error
-#endif
-
-int main(void)
-{
-       return 0;
-}
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644 (file)
index 72d595c..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644 (file)
index b25580b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS   0UL
-#define HARDIRQ_BITS   0UL
-#define SOFTIRQ_SHIFT  0UL
-#define HARDIRQ_SHIFT  0UL
-#define hardirq_count()        0UL
-#define softirq_count()        0UL
-
-#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644 (file)
index 501262a..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context()     0
-# define lockdep_softirq_context(p)    0
-# define lockdep_hardirqs_enabled()    0
-# define lockdep_softirqs_enabled(p)   0
-# define lockdep_hardirq_enter()       do { } while (0)
-# define lockdep_hardirq_exit()                do { } while (0)
-# define lockdep_softirq_enter()       do { } while (0)
-# define lockdep_softirq_exit()                do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags)        ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
index a7e54a08fb54c41b7b5da231119caab66d5431cc..3e8df500cfbd41d4139906348dfb259fb4641fab 100644 (file)
@@ -7,6 +7,7 @@
 #include <assert.h>
 #include <linux/build_bug.h>
 #include <linux/compiler.h>
+#include <linux/math.h>
 #include <endian.h>
 #include <byteswap.h>
 
@@ -14,8 +15,6 @@
 #define UINT_MAX       (~0U)
 #endif
 
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
 #define PERF_ALIGN(x, a)       __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
 #define __PERF_ALIGN_MASK(x, mask)     (((x)+(mask))&~(mask))
 
        _min1 < _min2 ? _min1 : _min2; })
 #endif
 
-#ifndef roundup
-#define roundup(x, y) (                                \
-{                                                      \
-       const typeof(y) __y = y;                       \
-       (((x) + (__y - 1)) / __y) * __y;               \
-}                                                      \
-)
-#endif
-
 #ifndef BUG_ON
 #ifdef NDEBUG
 #define BUG_ON(cond) do { if (cond) {} } while (0)
@@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
 
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
 #define current_gfp_context(k) 0
 #define synchronize_rcu()
 
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644 (file)
index e569972..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
-       u64 curr_chain_key;
-       int lockdep_depth;
-       unsigned int lockdep_recursion;
-       struct held_lock held_locks[MAX_LOCK_DEPTH];
-       gfp_t lockdep_reclaim_gfp;
-       int pid;
-       int state;
-       char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
-       return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
-       return false;
-}
-
-#endif
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644 (file)
index 0000000..4e7af99
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+       const typeof(y) __y = y;                       \
+       (((x) + (__y - 1)) / __y) * __y;               \
+}                                                      \
+)
+#endif
+
+#endif
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644 (file)
index 8b3b03b..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
index c934572d935cc116ba4b7607706973e0b4a32439..622266b197d0dd33c10b8da6a8fb105e47ad323e 100644 (file)
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
        return true;
 }
 
-#include <linux/lockdep.h>
-
 #endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644 (file)
index ae343ac..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
-       unsigned int nr_entries, max_entries;
-       unsigned long *entries;
-       int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
-       backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace)        \
-       ((trace)->nr_entries =  \
-               backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
-       void *array[64];
-       size_t size;
-
-       size = backtrace(array, 64);
-       backtrace_symbols_fd(array, size, 1);
-
-       return 0;
-}
-
-#endif
index 81a4c543ff7ea5b966b4de08195dea3f9068b1c6..4b384c907027eb4e21cf0997c1cf2c0f5710986c 100644 (file)
@@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf)
                        return -1;
                }
                memset(sym, 0, sizeof(*sym));
+               INIT_LIST_HEAD(&sym->pv_target);
                sym->alias = sym;
 
                sym->idx = i;
index c90c7084e45a9c68b022846189c5155600626551..bdf699f6552bed6432765c7ce37abbd1d520395c 100644 (file)
@@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
            !strcmp(func->name, "_paravirt_ident_64"))
                return;
 
+       /* already added this function */
+       if (!list_empty(&func->pv_target))
+               return;
+
        list_add(&func->pv_target, &f->pv_ops[idx].targets);
        f->pv_ops[idx].clean = false;
 }
index afd144725a0bf766e79c3f3b2aef73e1037ce242..3df74cf5651af9dda46f214bc860a76c30a0c98d 100644 (file)
@@ -271,8 +271,6 @@ endif
 
 FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
 FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 FEATURE_CHECK_LDFLAGS-libaio = -lrt
 
index 7bef917cc84e6499baf173cc4feef4c8430527f6..15109af9d0754d5fcb6e455532944ef1a0a14e17 100644 (file)
 446    common  landlock_restrict_self          sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448    common  process_mrelease                sys_process_mrelease
+449    common  futex_waitv                     sys_futex_waitv
index df5261e5cfe1f28d6afc412ba5475e588f6c1dde..ed9c5c2eafad700ce45ad0178837ed3d1c9204d1 100644 (file)
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
 # 447 reserved for memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
+449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
index fa0ff4ce2b7493550db066d9f9e2f998bd59de8e..488f6e6ba1a55e8decd839c98a8d6a8ee838209b 100644 (file)
@@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth,
                snd_ctx->out_fds[i] = fds[1];
                if (!thread_mode)
                        close(fds[0]);
-
-               free(ctx);
        }
 
        /* Now we have all the fds, fork the senders */
@@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth,
                for (i = 0; i < num_fds; i++)
                        close(snd_ctx->out_fds[i]);
 
-       free(snd_ctx);
-
        /* Return number of children to reap */
        return num_fds * 2;
 }
index bc5259db5fd91b51af3b376c5ee5334c137fec4c..409b721666cba393adfd398969783d51f834f2f4 100644 (file)
@@ -755,12 +755,16 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
        return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
 }
 
+static int output_fd(struct perf_inject *inject)
+{
+       return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
+}
+
 static int __cmd_inject(struct perf_inject *inject)
 {
        int ret = -EINVAL;
        struct perf_session *session = inject->session;
-       struct perf_data *data_out = &inject->output;
-       int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
+       int fd = output_fd(inject);
        u64 output_data_offset;
 
        signal(SIGINT, sig_handler);
@@ -820,7 +824,7 @@ static int __cmd_inject(struct perf_inject *inject)
                inject->tool.ordered_events = true;
                inject->tool.ordering_requires_timestamps = true;
                /* Allow space in the header for new attributes */
-               output_data_offset = 4096;
+               output_data_offset = roundup(8192 + session->header.data_offset, 4096);
                if (inject->strip)
                        strip_init(inject);
        }
@@ -1015,7 +1019,7 @@ int cmd_inject(int argc, const char **argv)
        }
 
        inject.session = __perf_session__new(&data, repipe,
-                                            perf_data__fd(&inject.output),
+                                            output_fd(&inject),
                                             &inject.tool);
        if (IS_ERR(inject.session)) {
                ret = PTR_ERR(inject.session);
@@ -1078,7 +1082,8 @@ out_delete:
        zstd_fini(&(inject.session->zstd_data));
        perf_session__delete(inject.session);
 out_close_output:
-       perf_data__close(&inject.output);
+       if (!inject.in_place_update)
+               perf_data__close(&inject.output);
        free(inject.itrace_synth_opts.vm_tm_corr_args);
        return ret;
 }
index c895de481fe10d5c22fa60623604211588bd0cec..d54c5371c6a6e414dbb19ffece465a0f40f7415a 100644 (file)
@@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
        TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
        TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
        TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
-       TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+       if (num_dies) // Some platforms do not have CPU die support, for example s390
+               TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
 
        /*
         * Source count returns the number of events aggregating in a leader
index 574b7e4efd3a5a64b31ebae700909ddebe22eb1e..07b6f4ec024f0a0f423d8484b4b21d6e268ed354 100644 (file)
@@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
        struct evsel *evsel;
        u64 count;
 
+       perf_stat__reset_shadow_stats();
        evlist__for_each_entry(evlist, evsel) {
                count = find_value(evsel->name, vals);
                perf_stat__update_shadow_stats(evsel, count, 0, st);
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
deleted file mode 100644 (file)
index 186a555..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-// Copyright (c) 2021 Facebook
-
-#ifndef __BPERF_STAT_H
-#define __BPERF_STAT_H
-
-typedef struct {
-       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
-       __uint(key_size, sizeof(__u32));
-       __uint(value_size, sizeof(struct bpf_perf_event_value));
-       __uint(max_entries, 1);
-} reading_map;
-
-#endif /* __BPERF_STAT_H */
index b8fa3cb2da2308034f8fe651f4a0cdb70d9cb6e7..f193998530d431d828eb0ebee6840e166eb6aae7 100644 (file)
@@ -1,14 +1,23 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 #include "bperf_u.h"
 
-reading_map diff_readings SEC(".maps");
-reading_map accum_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} accum_readings SEC(".maps");
 
 struct {
        __uint(type, BPF_MAP_TYPE_HASH);
index 4f70d1459e86cb99e72a38d0889a0f3c79a84f06..e2a2d4cd7779ce703618fd366fce63bddf871bbe 100644 (file)
@@ -1,10 +1,8 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
-#include "bperf.h"
 
 struct {
        __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -13,8 +11,19 @@ struct {
        __uint(map_flags, BPF_F_PRESERVE_ELEMS);
 } events SEC(".maps");
 
-reading_map prev_readings SEC(".maps");
-reading_map diff_readings SEC(".maps");
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} prev_readings SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_perf_event_value));
+       __uint(max_entries, 1);
+} diff_readings SEC(".maps");
 
 SEC("raw_tp/sched_switch")
 int BPF_PROG(on_switch)
index ab12b4c4ece21a9a3fd0f1f540d67241b34e5f03..97037d3b3d9fa4cd70838b32397c4f0f488c115e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
index 95ffed66369c3287f9696ba1f1ce44eba7a5bf02..c59331eea1d9102c910c4290a9c88126e20beedf 100644 (file)
@@ -44,13 +44,16 @@ struct perf_event_attr;
 /* perf sample has 16 bits size limit */
 #define PERF_SAMPLE_MAX_SIZE (1 << 16)
 
+/* number of register is bound by the number of bits in regs_dump::mask (64) */
+#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64))
+
 struct regs_dump {
        u64 abi;
        u64 mask;
        u64 *regs;
 
        /* Cached values/mask filled by first register access. */
-       u64 cache_regs[PERF_REGS_MAX];
+       u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE];
        u64 cache_mask;
 };
 
index 1d532b9fed29c2cf1349f5e730dfd5e77535a694..254601060b392c421a5e43f47c5403c99d074b6b 100644 (file)
@@ -12,6 +12,7 @@
 #include "expr-bison.h"
 #include "expr-flex.h"
 #include "smt.h"
+#include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/zalloc.h>
 #include <ctype.h>
@@ -299,6 +300,10 @@ struct expr_parse_ctx *expr__ctx_new(void)
                return NULL;
 
        ctx->ids = hashmap__new(key_hash, key_equal, NULL);
+       if (IS_ERR(ctx->ids)) {
+               free(ctx);
+               return NULL;
+       }
        ctx->runtime = 0;
 
        return ctx;
index 79cce216727e03021937c5bac4b569f07b494f39..e3c1a532d05910bf634a1d22cc452eaba7dc057f 100644 (file)
@@ -2321,6 +2321,7 @@ out:
 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
 {\
+       free(ff->ph->env.__feat_env);                \
        ff->ph->env.__feat_env = do_read_string(ff); \
        return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
 }
@@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session,
        struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
        int type = fe->header.type;
        u64 feat = fe->feat_id;
+       int ret = 0;
 
        if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session,
        ff.size = event->header.size - sizeof(*fe);
        ff.ph = &session->header;
 
-       if (feat_ops[feat].process(&ff, NULL))
-               return -1;
+       if (feat_ops[feat].process(&ff, NULL)) {
+               ret = -1;
+               goto out;
+       }
 
        if (!feat_ops[feat].print || !tool->show_feat_hdr)
-               return 0;
+               goto out;
 
        if (!feat_ops[feat].full_only ||
            tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
@@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session,
                fprintf(stdout, "# %s info available, use -I to display\n",
                        feat_ops[feat].name);
        }
-
-       return 0;
+out:
+       free_event_desc(ff.events);
+       return ret;
 }
 
 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
index 5f83937bf8f3cbf653b2bb9f4a9356641a75f601..0e013c2d9eb43537ad43d850aba29cc8c4b11c21 100644 (file)
@@ -1205,61 +1205,69 @@ out_no_progress:
 
 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
 {
+       enum intel_pt_sample_type type = decoder->state.type;
        bool ret = false;
 
+       decoder->state.type &= ~INTEL_PT_BRANCH;
+
        if (decoder->set_fup_tx_flags) {
                decoder->set_fup_tx_flags = false;
                decoder->tx_flags = decoder->fup_tx_flags;
-               decoder->state.type = INTEL_PT_TRANSACTION;
+               decoder->state.type |= INTEL_PT_TRANSACTION;
                if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
                        decoder->state.type |= INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.flags = decoder->fup_tx_flags;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_ptw) {
                decoder->set_fup_ptw = false;
-               decoder->state.type = INTEL_PT_PTW;
+               decoder->state.type |= INTEL_PT_PTW;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.ptw_payload = decoder->fup_ptw_payload;
-               return true;
+               ret = true;
        }
        if (decoder->set_fup_mwait) {
                decoder->set_fup_mwait = false;
-               decoder->state.type = INTEL_PT_MWAIT_OP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
+               decoder->state.type |= INTEL_PT_MWAIT_OP;
                decoder->state.mwait_payload = decoder->fup_mwait_payload;
                ret = true;
        }
        if (decoder->set_fup_pwre) {
                decoder->set_fup_pwre = false;
                decoder->state.type |= INTEL_PT_PWR_ENTRY;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                decoder->state.pwre_payload = decoder->fup_pwre_payload;
                ret = true;
        }
        if (decoder->set_fup_exstop) {
                decoder->set_fup_exstop = false;
                decoder->state.type |= INTEL_PT_EX_STOP;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
                decoder->state.flags |= INTEL_PT_FUP_IP;
-               decoder->state.from_ip = decoder->ip;
-               decoder->state.to_ip = 0;
                ret = true;
        }
        if (decoder->set_fup_bep) {
                decoder->set_fup_bep = false;
                decoder->state.type |= INTEL_PT_BLK_ITEMS;
-               decoder->state.type &= ~INTEL_PT_BRANCH;
+               ret = true;
+       }
+       if (decoder->overflow) {
+               decoder->overflow = false;
+               if (!ret && !decoder->pge) {
+                       if (decoder->hop) {
+                               decoder->state.type = 0;
+                               decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+                       }
+                       decoder->pge = true;
+                       decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
+                       decoder->state.from_ip = 0;
+                       decoder->state.to_ip = decoder->ip;
+                       return true;
+               }
+       }
+       if (ret) {
                decoder->state.from_ip = decoder->ip;
                decoder->state.to_ip = 0;
-               ret = true;
+       } else {
+               decoder->state.type = type;
        }
        return ret;
 }
@@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
        intel_pt_clear_tx_flags(decoder);
        intel_pt_set_nr(decoder);
        decoder->timestamp_insn_cnt = 0;
-       decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+       decoder->state.from_ip = decoder->ip;
+       decoder->ip = 0;
+       decoder->pge = false;
+       decoder->set_fup_tx_flags = false;
+       decoder->set_fup_ptw = false;
+       decoder->set_fup_mwait = false;
+       decoder->set_fup_pwre = false;
+       decoder->set_fup_exstop = false;
+       decoder->set_fup_bep = false;
        decoder->overflow = true;
        return -EOVERFLOW;
 }
@@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
 /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
 static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
 {
+       *err = 0;
+
        /* Leap from PSB to PSB, getting ip from FUP within PSB+ */
        if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
                *err = intel_pt_scan_for_psb(decoder);
@@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                return HOP_IGNORE;
 
        case INTEL_PT_TIP_PGD:
+               decoder->pge = false;
                if (!decoder->packet.count) {
                        intel_pt_set_nr(decoder);
                        return HOP_IGNORE;
@@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
                if (!decoder->packet.count)
                        return HOP_IGNORE;
                intel_pt_set_ip(decoder);
-               if (intel_pt_fup_event(decoder))
-                       return HOP_RETURN;
-               if (!decoder->branch_enable)
+               if (decoder->set_fup_mwait || decoder->set_fup_pwre)
+                       *no_tip = true;
+               if (!decoder->branch_enable || !decoder->pge)
                        *no_tip = true;
                if (*no_tip) {
                        decoder->state.type = INTEL_PT_INSTRUCTION;
                        decoder->state.from_ip = decoder->ip;
                        decoder->state.to_ip = 0;
+                       intel_pt_fup_event(decoder);
                        return HOP_RETURN;
                }
+               intel_pt_fup_event(decoder);
+               decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
                *err = intel_pt_walk_fup_tip(decoder);
-               if (!*err)
+               if (!*err && decoder->state.to_ip)
                        decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
                return HOP_RETURN;
 
@@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
 {
        struct intel_pt_psb_info data = { .fup = false };
 
-       if (!decoder->branch_enable || !decoder->pge)
+       if (!decoder->branch_enable)
                return false;
 
        intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
@@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
                if (err)
                        return err;
 next:
+               err = 0;
                if (decoder->cyc_threshold) {
                        if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
                                decoder->sample_cyc = false;
@@ -2962,6 +2986,7 @@ next:
 
                case INTEL_PT_TIP_PGE: {
                        decoder->pge = true;
+                       decoder->overflow = false;
                        intel_pt_mtc_cyc_cnt_pge(decoder);
                        intel_pt_set_nr(decoder);
                        if (decoder->packet.count == 0) {
@@ -2999,7 +3024,7 @@ next:
                                break;
                        }
                        intel_pt_set_last_ip(decoder);
-                       if (!decoder->branch_enable) {
+                       if (!decoder->branch_enable || !decoder->pge) {
                                decoder->ip = decoder->last_ip;
                                if (intel_pt_fup_event(decoder))
                                        return 0;
@@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
        decoder->set_fup_pwre = false;
        decoder->set_fup_exstop = false;
        decoder->set_fup_bep = false;
+       decoder->overflow = false;
 
        if (!decoder->branch_enable) {
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-               decoder->overflow = false;
                decoder->state.type = 0; /* Do not have a sample */
                return 0;
        }
@@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
                decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
        else
                decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
-       decoder->overflow = false;
 
        decoder->state.from_ip = 0;
        decoder->state.to_ip = decoder->ip;
@@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
        }
 
        decoder->have_last_ip = true;
-       decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+       decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
 
        err = intel_pt_walk_psb(decoder);
        if (err)
@@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
 
        if (err) {
                decoder->state.err = intel_pt_ext_err(err);
-               decoder->state.from_ip = decoder->ip;
+               if (err != -EOVERFLOW)
+                       decoder->state.from_ip = decoder->ip;
                intel_pt_update_sample_time(decoder);
                decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
                intel_pt_set_nr(decoder);
index 556a893508daeb9aca14806ba9779de982208771..10c3187e4c5aae1886a5cdcce7af4d64efcae62a 100644 (file)
@@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
                                ptq->sync_switch = false;
                                intel_pt_next_tid(pt, ptq);
                        }
+                       ptq->timestamp = state->est_timestamp;
                        if (pt->synth_opts.errors) {
                                err = intel_ptq_synth_error(ptq, state);
                                if (err)
index 5ee47ae1509c67fcf015ae5e52637d27272af423..06a7461ba864c7597492b011a5e1d27586a10065 100644 (file)
@@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
        int i, idx = 0;
        u64 mask = regs->mask;
 
+       if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
+               return -EINVAL;
+
        if (regs->cache_mask & (1ULL << id))
                goto out;
 
index 563a9ba8954f31b3cbf561268ad9a2da3b84cca2..7f782a31bda3b67876bc8aaee5896ebaff051e9c 100644 (file)
@@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
                struct tep_event *tp_format;
 
                tp_format = trace_event__tp_format_id(evsel->core.attr.config);
-               if (!tp_format)
+               if (IS_ERR_OR_NULL(tp_format))
                        return NULL;
 
                evsel->tp_format = tp_format;
index 20bacd5972adec4425ad11bef211a9dee5b6d45d..34f1b1b1176c7808f7c08f0f945b59cbb43de27b 100644 (file)
@@ -15,7 +15,7 @@ int smt_on(void)
        if (cached)
                return cached_result;
 
-       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
+       if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
                goto done;
 
        ncpu = sysconf(_SC_NPROCESSORS_CONF);
index 331f6d30f47261864a7c5654a1efcc2df7db5490..cd7106876a5f39dfda38e286c54c3a7c268b34a2 100644 (file)
@@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include
 ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
 CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
 CFLAGS += $(WARNINGS)
+MKDIR = mkdir
 
 ifeq ($(strip $(V)),false)
        QUIET=@
index 2a6c170b57cd4aabc81a08a11ffa079b61daefb7..1d7616f5d0aec848204ede85d79ae6a9516cfc0a 100644 (file)
@@ -21,6 +21,7 @@ $(KERNEL_INCLUDE):
 
 $(objdir)%.o: %.c $(KERNEL_INCLUDE)
        $(ECHO) "  CC      " $(subst $(OUTPUT),,$@)
+       $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null
        $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
 
 all: $(OUTPUT)$(TOOL)
index 565fccdfe6e954a4bec7d11bebb53a108623be56..016cff473cfc483963cd1b4586243fa3e73378e7 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef _LINUX_LOCKDEP_H
 #define _LINUX_LOCKDEP_H
+
+#include <linux/spinlock.h>
+
 struct lock_class_key {
        unsigned int a;
 };
index 5d52ea2768df4480a496ad87457fe8b862d57457..df3b292a8ffec06b0d902b6eb815f40742a35aad 100644 (file)
@@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n)
        return sum;
 }
 
+__weak noinline struct file *bpf_testmod_return_ptr(int arg)
+{
+       static struct file f = {};
+
+       switch (arg) {
+       case 1: return (void *)EINVAL;          /* user addr */
+       case 2: return (void *)0xcafe4a11;      /* user addr */
+       case 3: return (void *)-EINVAL;         /* canonical, but invalid */
+       case 4: return (void *)(1ull << 60);    /* non-canonical and invalid */
+       case 5: return (void *)~(1ull << 30);   /* trigger extable */
+       case 6: return &f;                      /* valid addr */
+       case 7: return (void *)((long)&f | 1);  /* kernel tricks */
+       default: return NULL;
+       }
+}
+
 noinline ssize_t
 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
                      struct bin_attribute *bin_attr,
@@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
                .off = off,
                .len = len,
        };
+       int i = 1;
+
+       while (bpf_testmod_return_ptr(i))
+               i++;
 
        /* This is always true. Use the check to make sure the compiler
         * doesn't remove bpf_testmod_loop_test.
index 762f6a9da8b5e9bdf89d888fe561c1e8b2622555..664ffc0364f4f0e3a4dea2ee7a46ba5eb61cd3d2 100644 (file)
@@ -90,7 +90,7 @@ static void print_err_line(void)
 
 static void test_conn(void)
 {
-       int listen_fd = -1, cli_fd = -1, err;
+       int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
        socklen_t addrlen = sizeof(srv_sa6);
        int srv_port;
 
@@ -112,6 +112,10 @@ static void test_conn(void)
        if (CHECK_FAIL(cli_fd == -1))
                goto done;
 
+       srv_fd = accept(listen_fd, NULL, NULL);
+       if (CHECK_FAIL(srv_fd == -1))
+               goto done;
+
        if (CHECK(skel->bss->listen_tp_sport != srv_port ||
                  skel->bss->req_sk_sport != srv_port,
                  "Unexpected sk src port",
@@ -134,11 +138,13 @@ done:
                close(listen_fd);
        if (cli_fd != -1)
                close(cli_fd);
+       if (srv_fd != -1)
+               close(srv_fd);
 }
 
 static void test_syncookie(void)
 {
-       int listen_fd = -1, cli_fd = -1, err;
+       int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
        socklen_t addrlen = sizeof(srv_sa6);
        int srv_port;
 
@@ -161,6 +167,10 @@ static void test_syncookie(void)
        if (CHECK_FAIL(cli_fd == -1))
                goto done;
 
+       srv_fd = accept(listen_fd, NULL, NULL);
+       if (CHECK_FAIL(srv_fd == -1))
+               goto done;
+
        if (CHECK(skel->bss->listen_tp_sport != srv_port,
                  "Unexpected tp src port",
                  "listen_tp_sport:%u expected:%u\n",
@@ -188,6 +198,8 @@ done:
                close(listen_fd);
        if (cli_fd != -1)
                close(cli_fd);
+       if (srv_fd != -1)
+               close(srv_fd);
 }
 
 struct test {
index b36857093f71fea44eb42b068e0aff1e5d5b70b1..50ce16d02da7b1a9a4bdfe1ec0f0276e8603e1e3 100644 (file)
@@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit,
        return 0;
 }
 
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+       long buf = 0;
+
+       bpf_probe_read_kernel(&buf, 8, ret);
+       bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+       *(volatile long long *)ret;
+       *(volatile int *)&ret->f_mode;
+       return 0;
+}
+
 __u32 fmod_ret_read_sz = 0;
 
 SEC("fmod_ret/bpf_testmod_test_read")
index 465ef3f112c0c96446e48dbbfc3f73eac170bbeb..d3bf83d5c6cff2dafc48e6edfec57601c1efc689 100644 (file)
@@ -54,7 +54,7 @@
 #define MAX_INSNS      BPF_MAXINSNS
 #define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    21
+#define MAX_NR_MAPS    22
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
index c22dc83a41fdc43350285d1db6eb729fe67a7bfd..b39665f33524faba5927f8be8b59bc1211736662 100644 (file)
                BPF_EXIT_INSN(),
        },
        .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
 },
 {
        "Dest pointer in r0 - succeed",
                BPF_EXIT_INSN(),
        },
        .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 2",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r0 */
+               BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 3",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+       .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+       "Dest pointer in r0 - succeed, check 4",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r10 */
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R10 partial copy of pointer",
+},
+{
+       "Dest pointer in r0 - succeed, check 5",
+       .insns = {
+               /* r0 = &val */
+               BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+               /* val = r0; */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+               /* r5 = &val */
+               BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+               /* r0 = atomic_cmpxchg(&val, r0, r5); */
+               BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+               /* r1 = *r0 */
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
+               /* exit(0); */
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "R0 invalid mem access",
+       .errstr_unpriv = "R10 partial copy of pointer",
 },
index 3bc9ff7a860b7fa4c85ab8b79fe9b0bb19e84e88..5bf03fb4fa2b6ad6fb450f6993baeb243f853cf0 100644 (file)
@@ -1,3 +1,97 @@
+{
+       "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+       "atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = ACCEPT,
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+       "atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+},
+{
+       "atomic w/fetch and address leakage of (map ptr & -1) via returned value",
+       .insns = {
+               BPF_LD_IMM64(BPF_REG_1, -1),
+               BPF_LD_MAP_FD(BPF_REG_8, 0),
+               BPF_LD_MAP_FD(BPF_REG_9, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+               BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+               BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+               BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+               BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+               BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 2, 4 },
+       .result = REJECT,
+       .errstr = "invalid size of register fill",
+},
 #define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
        {                                                               \
                "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
index 7e50cb80873a5b7c08f607fe27006bda2afc6bf5..682519769fe3c0019e53b151e27c3c4c8d37d5e7 100644 (file)
        .result = REJECT,
        .prog_type = BPF_PROG_TYPE_TRACEPOINT,
 },
+{
+       "precision tracking for u32 spill/fill",
+       .insns = {
+               BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV32_IMM(BPF_REG_6, 32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV32_IMM(BPF_REG_6, 4),
+               /* Additional insns to introduce a pruning point. */
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 0),
+               /* u32 spill/fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
+               /* out-of-bound map value access for r6=32 */
+               BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+               BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+               BPF_LD_MAP_FD(BPF_REG_1, 0),
+               BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .fixup_map_hash_8b = { 15 },
+       .result = REJECT,
+       .errstr = "R0 min value is outside of the allowed memory range",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+       "precision tracking for u32 spills, u64 fill",
+       .insns = {
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+               BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
+               /* Additional insns to introduce a pruning point. */
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               /* u32 spills, u64 fill */
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+               BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
+               /* if r8 != X goto pc+1  r8 known in fallthrough branch */
+               BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
+               BPF_MOV64_IMM(BPF_REG_3, 1),
+               /* if r8 == X goto pc+1  condition always true on first
+                * traversal, so starts backtracking to mark r8 as requiring
+                * precision. r7 marked as needing precision. r6 not marked
+                * since it's not tracked.
+                */
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
+               /* fails if r8 correctly marked unknown after fill. */
+               BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+               BPF_MOV64_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "div by zero",
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
 {
        "allocated_stack",
        .insns = {
index 7ab3de1087614663dcb6f3cd0f775f9762ba1681..6c907144311f81c4ab39a95e61941cce13ba418f 100644 (file)
        .errstr = "invalid access to packet",
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
 },
+{
+       "Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       /* r6 = 0 */
+       BPF_MOV32_IMM(BPF_REG_6, 0),
+       /* r7 = 20 */
+       BPF_MOV32_IMM(BPF_REG_7, 20),
+       /* *(u32 *)(r10 -4) = r6 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+       /* *(u32 *)(r10 -8) = r7 */
+       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+       /* r4 = *(u64 *)(r10 -8) */
+       BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+       /* r0 = r2 */
+       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+       /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+       /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+       /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = REJECT,
+       .errstr = "invalid access to packet",
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
 {
        "Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
        .insns = {
index 2debba4e8a3a8ef060dccbd62cb8ee598606adb3..4d347bc53aa28e2c439ce225402731d5d08e512c 100644 (file)
        .errstr = "R0 invalid mem access 'inv'",
        .errstr_unpriv = "R0 pointer -= pointer prohibited",
 },
+{
+       "map access: trying to leak tained dst reg",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+       BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF),
+       BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+       BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 4 },
+       .result = REJECT,
+       .errstr = "math between map_value pointer and 4294967295 is not allowed",
+},
 {
        "32bit pkt_ptr -= scalar",
        .insns = {
index bfb97383e6b5ae2c7e10c0e8294d4dd115b61954..b4ec228eb95d05e02ef416351c4c9578b38b983b 100644 (file)
@@ -35,7 +35,7 @@
        .prog_type = BPF_PROG_TYPE_XDP,
 },
 {
-       "XDP pkt read, pkt_data' > pkt_end, good access",
+       "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end > pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end > pkt_data', bad access 1",
+       "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' < pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end < pkt_data', good access",
+       "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data' >= pkt_end, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end >= pkt_data', good access",
+       "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data' <= pkt_end, good access",
+       "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_end <= pkt_data', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+       "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
                    offsetof(struct xdp_md, data_end)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' > pkt_data, good access",
+       "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                   offsetof(struct xdp_md, data_end)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data > pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' < pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
        BPF_JMP_IMM(BPF_JA, 0, 0, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data < pkt_meta', good access",
+       "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_meta' >= pkt_data, good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data >= pkt_meta', good access",
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_meta' <= pkt_data, good access",
+       "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .errstr = "R1 offset is outside of the packet",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
 {
        "XDP pkt read, pkt_data <= pkt_meta', good access",
        .insns = {
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
-       "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+       "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
        .insns = {
        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
                    offsetof(struct xdp_md, data_meta)),
        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
        BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
        BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
-       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
        },
        .prog_type = BPF_PROG_TYPE_XDP,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                   offsetof(struct xdp_md, data_meta)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+       BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_XDP,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
new file mode 100644 (file)
index 0000000..c6c2965
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+huge_count_read_write
index 8a3f2cd9fec0c65b8d88f7ff1b891c7ef6746c0b..937d36ae9a69c2d8955791437e75049b9f0bc0f0 100644 (file)
@@ -1,7 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for damon selftests
 
-TEST_FILES = _chk_dependency.sh
-TEST_PROGS = debugfs_attrs.sh
+TEST_GEN_FILES += huge_count_read_write
+
+TEST_FILES = _chk_dependency.sh _debugfs_common.sh
+TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
 
 include ../lib.mk
diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh
new file mode 100644 (file)
index 0000000..48989d4
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+       file=$1
+       content=$2
+       orig_content=$3
+       expect_reason=$4
+       expected=$5
+
+       echo "$content" > "$file"
+       if [ $? -ne "$expected" ]
+       then
+               echo "writing $content to $file doesn't return $expected"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+test_write_succ() {
+       test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+       test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+       file=$1
+       orig_content=$2
+       expected=$3
+       expect_reason=$4
+
+       content=$(cat "$file")
+       if [ "$content" != "$expected" ]
+       then
+               echo "reading $file expected $expected but $content"
+               echo "expected because: $expect_reason"
+               echo "$orig_content" > "$file"
+               exit 1
+       fi
+}
+
+source ./_chk_dependency.sh
+
+damon_onoff="$DBGFS/monitor_on"
+if [ $(cat "$damon_onoff") = "on" ]
+then
+       echo "monitoring is on"
+       exit $ksft_skip
+fi
index 196b6640bf3783601833f348447d720f71f9f7ec..902e312bca898b5b88a88e23ec7c26045208f940 100644 (file)
@@ -1,48 +1,7 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
-test_write_result() {
-       file=$1
-       content=$2
-       orig_content=$3
-       expect_reason=$4
-       expected=$5
-
-       echo "$content" > "$file"
-       if [ $? -ne "$expected" ]
-       then
-               echo "writing $content to $file doesn't return $expected"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-test_write_succ() {
-       test_write_result "$1" "$2" "$3" "$4" 0
-}
-
-test_write_fail() {
-       test_write_result "$1" "$2" "$3" "$4" 1
-}
-
-test_content() {
-       file=$1
-       orig_content=$2
-       expected=$3
-       expect_reason=$4
-
-       content=$(cat "$file")
-       if [ "$content" != "$expected" ]
-       then
-               echo "reading $file expected $expected but $content"
-               echo "expected because: $expect_reason"
-               echo "$orig_content" > "$file"
-               exit 1
-       fi
-}
-
-source ./_chk_dependency.sh
+source _debugfs_common.sh
 
 # Test attrs file
 # ===============
@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
        "min_nr_regions > max_nr_regions"
 test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
 echo "$orig_content" > "$file"
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
-       "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-echo "$orig_content" > "$file"
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
-
-echo "PASS"
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
new file mode 100644 (file)
index 0000000..87aff80
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test empty targets case
+# =======================
+
+orig_target_ids=$(cat "$DBGFS/target_ids")
+echo "" > "$DBGFS/target_ids"
+orig_monitor_on=$(cat "$DBGFS/monitor_on")
+test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
+echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
new file mode 100644 (file)
index 0000000..922cada
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test huge count read write
+# ==========================
+
+dmesg -C
+
+for file in "$DBGFS/"*
+do
+       ./huge_count_read_write "$file"
+done
+
+if dmesg | grep -q WARNING
+then
+       dmesg
+       exit 1
+else
+       exit 0
+fi
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
new file mode 100644 (file)
index 0000000..5b39ab4
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test schemes file
+# =================
+
+file="$DBGFS/schemes"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
+       "$orig_content" "valid input"
+test_write_fail "$file" "1 2
+3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
+test_write_succ "$file" "" "$orig_content" "disabling"
+test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
+       "$orig_content" "wrong condition ranges"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
new file mode 100644 (file)
index 0000000..49aeabd
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
new file mode 100644 (file)
index 0000000..ad7a6b4
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void write_read_with_huge_count(char *file)
+{
+       int filedesc = open(file, O_RDWR);
+       char buf[25];
+       int ret;
+
+       printf("%s %s\n", __func__, file);
+       if (filedesc < 0) {
+               fprintf(stderr, "failed opening %s\n", file);
+               exit(1);
+       }
+
+       write(filedesc, "", 0xfffffffful);
+       perror("after write: ");
+       ret = read(filedesc, buf, 0xfffffffful);
+       perror("after read: ");
+       close(filedesc);
+}
+
+int main(int argc, char *argv[])
+{
+       if (argc != 2) {
+               fprintf(stderr, "Usage: %s <file>\n", argv[0]);
+               exit(1);
+       }
+       write_read_with_huge_count(argv[1]);
+
+       return 0;
+}
index b513f64d9092d1a35e7f33aba79580f7bbdcfb4f..026a126f584d76f01adef295a1449516fcbf6efb 100755 (executable)
@@ -72,6 +72,35 @@ rif_mac_profile_replacement_test()
        ip link set $h1.10 address $h1_10_mac
 }
 
+rif_mac_profile_consolidation_test()
+{
+       local count=$1; shift
+       local h1_20_mac
+
+       RET=0
+
+       if [[ $count -eq 1 ]]; then
+               return
+       fi
+
+       h1_20_mac=$(mac_get $h1.20)
+
+       # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are
+       # using the same MAC profile.
+       ip link set $h1.20 address 00:11:11:11:11:11
+       check_err $?
+
+       occ=$(devlink -j resource show $DEVLINK_DEV \
+             | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]')
+
+       [[ $occ -eq $((count - 1)) ]]
+       check_err $? "MAC profile occupancy did not decrease"
+
+       log_test "RIF MAC profile consolidation"
+
+       ip link set $h1.20 address $h1_20_mac
+}
+
 rif_mac_profile_shared_replacement_test()
 {
        local count=$1; shift
@@ -104,6 +133,7 @@ rif_mac_profile_edit_test()
        create_max_rif_mac_profiles $count
 
        rif_mac_profile_replacement_test
+       rif_mac_profile_consolidation_test $count
        rif_mac_profile_shared_replacement_test $count
 }
 
index 3763105029fb3b3c6257cae7c2003e19ecdd0df9..3cb5ac5da0875b2018e574afb3c98bf5ff278c00 100644 (file)
 /x86_64/svm_int_ctl_test
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
+/x86_64/userspace_io_test
 /x86_64/userspace_msr_exit_test
 /x86_64/vmx_apic_access_test
 /x86_64/vmx_close_while_nested_test
 /x86_64/vmx_dirty_log_test
+/x86_64/vmx_invalid_nested_guest_state
 /x86_64/vmx_preemption_timer_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
index c4e34717826aaf19dc4ad9e6f602d6a8eaa708c9..17342b575e855a0da67e173ae0cde8aaf51462e3 100644 (file)
@@ -59,10 +59,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
 TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
index 6a1a37f30494b327e36c8b54ec9ddd8a6d0996bc..2d62edc49d67f3086208cbd7ba0fca8161aabb53 100644 (file)
@@ -321,6 +321,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
 
 unsigned int vm_get_page_size(struct kvm_vm *vm);
 unsigned int vm_get_page_shift(struct kvm_vm *vm);
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
 uint64_t vm_get_max_gfn(struct kvm_vm *vm);
 int vm_get_fd(struct kvm_vm *vm);
 
index f968dfd4ee88929d523824c948ff8a1447120ed9..aed9dc3ca1e9eeb45246d643e48da4817f4c542c 100644 (file)
@@ -12,6 +12,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/resource.h>
 
 #include "test_util.h"
 
@@ -40,10 +41,39 @@ int main(int argc, char *argv[])
 {
        int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
        int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+       /*
+        * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
+        * an arbitrary number for everything else.
+        */
+       int nr_fds_wanted = kvm_max_vcpus + 100;
+       struct rlimit rl;
 
        pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
        pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
 
+       /*
+        * Check that we're allowed to open nr_fds_wanted file descriptors and
+        * try raising the limits if needed.
+        */
+       TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+       if (rl.rlim_cur < nr_fds_wanted) {
+               rl.rlim_cur = nr_fds_wanted;
+               if (rl.rlim_max < nr_fds_wanted) {
+                       int old_rlim_max = rl.rlim_max;
+                       rl.rlim_max = nr_fds_wanted;
+
+                       int r = setrlimit(RLIMIT_NOFILE, &rl);
+                       if (r < 0) {
+                               printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+                                      old_rlim_max, nr_fds_wanted);
+                               exit(KSFT_SKIP);
+                       }
+               } else {
+                       TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+               }
+       }
+
        /*
         * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
         * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
index 3836322add00ce894f11e72233ee34198d6a66e2..ba1fdc3dcf4a90319f1a9d7cd8dd9bbeaaa5f5b4 100644 (file)
@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
 #ifdef __s390x__
        alignment = max(0x100000, alignment);
 #endif
-       guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
+       guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
 
        /* Set up the shared data structure test_args */
        test_args.vm = vm;
index 8f2e0bb1ef96a4f2103fc89c94a13bde34f09f4a..53d2b5d04b829bc75a23905c0ce03647bb2c5ab4 100644 (file)
@@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
                (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
 
        /* Limit physical addresses to PA-bits. */
-       vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+       vm->max_gfn = vm_compute_max_gfn(vm);
 
        /* Allocate and setup memory for guest. */
        vm->vpages_mapped = sparsebit_alloc();
@@ -2328,6 +2328,11 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
        return vm->page_shift;
 }
 
+unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+}
+
 uint64_t vm_get_max_gfn(struct kvm_vm *vm)
 {
        return vm->max_gfn;
index 82c39db913699949648974ecb3530bad60431bea..eef7b34756d5ce40dbc2d6ea8751971245b2cfec 100644 (file)
@@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
 
        return cpuid;
 }
+
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+static inline unsigned x86_family(unsigned int eax)
+{
+        unsigned int x86;
+
+        x86 = (eax >> 8) & 0xf;
+
+        if (x86 == 0xf)
+                x86 += (eax >> 20) & 0xff;
+
+        return x86;
+}
+
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+       const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
+       unsigned long ht_gfn, max_gfn, max_pfn;
+       uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+
+       max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+
+       /* Avoid reserved HyperTransport region on AMD processors.  */
+       eax = ecx = 0;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
+           ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
+           edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+               return max_gfn;
+
+       /* On parts with <40 physical address bits, the area is fully hidden */
+       if (vm->pa_bits < 40)
+               return max_gfn;
+
+       /* Before family 17h, the HyperTransport area is just below 1T.  */
+       ht_gfn = (1 << 28) - num_ht_pages;
+       eax = 1;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       if (x86_family(eax) < 0x17)
+               goto done;
+
+       /*
+        * Otherwise it's at the top of the physical address space, possibly
+        * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX.  Use
+        * the old conservative value if MAXPHYADDR is not enumerated.
+        */
+       eax = 0x80000000;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_ext_leaf = eax;
+       if (max_ext_leaf < 0x80000008)
+               goto done;
+
+       eax = 0x80000008;
+       cpuid(&eax, &ebx, &ecx, &edx);
+       max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
+       if (max_ext_leaf >= 0x8000001f) {
+               eax = 0x8000001f;
+               cpuid(&eax, &ebx, &ecx, &edx);
+               max_pfn >>= (ebx >> 6) & 0x3f;
+       }
+
+       ht_gfn = max_pfn - num_ht_pages;
+done:
+       return min(max_gfn, ht_gfn - 1);
+}
index 91d88aaa989928723be6aa575bc967082b16ca07..672915ce73d8f6a363b5e2ea690f3fc44e7c9b01 100644 (file)
@@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
        vcpu_set_cpuid(vm, VCPU_ID, cpuid);
 }
 
-static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
-                                  struct kvm_cpuid2 *best)
+static void guest_test_msrs_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-       struct kvm_enable_cap cap = {0};
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_cpuid2 *best;
+       vm_vaddr_t msr_gva;
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       struct msr_data *msr;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+               msr_gva = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+               msr = addr_gva2hva(vm, msr_gva);
+
+               vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        /*
@@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                         * capability enabled and guest visible CPUID bit unset.
                         */
                        cap.cap = KVM_CAP_HYPERV_SYNIC2;
+                       cap.args[0] = 0;
                        vcpu_enable_cap(vm, VCPU_ID, &cap);
                        break;
                case 22:
@@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
-static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
-                                    void *input, void *output, struct kvm_cpuid2 *best)
+static void guest_test_hcalls_access(void)
 {
        struct kvm_run *run;
+       struct kvm_vm *vm;
        struct ucall uc;
        int stage = 0, r;
        struct kvm_cpuid_entry2 feat = {
@@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
        struct kvm_cpuid_entry2 dbg = {
                .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
        };
-
-       run = vcpu_state(vm, VCPU_ID);
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+               .args = {1}
+       };
+       vm_vaddr_t hcall_page, hcall_params;
+       struct hcall_data *hcall;
+       struct kvm_cpuid2 *best;
 
        while (true) {
+               vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+               vm_init_descriptor_tables(vm);
+               vcpu_init_descriptor_tables(vm, VCPU_ID);
+               vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
+               /* Hypercall input/output */
+               hcall_page = vm_vaddr_alloc_pages(vm, 2);
+               hcall = addr_gva2hva(vm, hcall_page);
+               memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+               hcall_params = vm_vaddr_alloc_page(vm);
+               memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+               vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+               vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+               best = kvm_get_supported_hv_cpuid();
+
+               run = vcpu_state(vm, VCPU_ID);
+
                switch (stage) {
                case 0:
                        hcall->control = 0xdeadbeef;
@@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
 
                switch (get_ucall(vm, VCPU_ID, &uc)) {
                case UCALL_SYNC:
-                       TEST_ASSERT(uc.args[1] == stage,
-                                   "Unexpected stage: %ld (%d expected)\n",
-                                   uc.args[1], stage);
+                       TEST_ASSERT(uc.args[1] == 0,
+                                   "Unexpected stage: %ld (0 expected)\n",
+                                   uc.args[1]);
                        break;
                case UCALL_ABORT:
                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
                }
 
                stage++;
+               kvm_vm_free(vm);
        }
 }
 
 int main(void)
 {
-       struct kvm_cpuid2 *best;
-       struct kvm_vm *vm;
-       vm_vaddr_t msr_gva, hcall_page, hcall_params;
-       struct kvm_enable_cap cap = {
-               .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
-               .args = {1}
-       };
-
-       /* Test MSRs */
-       vm = vm_create_default(VCPU_ID, 0, guest_msr);
-
-       msr_gva = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
-       vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
-
        pr_info("Testing access to Hyper-V specific MSRs\n");
-       guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
-                              best);
-       kvm_vm_free(vm);
-
-       /* Test hypercalls */
-       vm = vm_create_default(VCPU_ID, 0, guest_hcall);
-
-       vm_init_descriptor_tables(vm);
-       vcpu_init_descriptor_tables(vm, VCPU_ID);
-       vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
-
-       /* Hypercall input/output */
-       hcall_page = vm_vaddr_alloc_pages(vm, 2);
-       memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
-
-       hcall_params = vm_vaddr_alloc_page(vm);
-       memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
-
-       vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
-       vcpu_enable_cap(vm, VCPU_ID, &cap);
-
-       vcpu_set_hv_cpuid(vm, VCPU_ID);
-
-       best = kvm_get_supported_hv_cpuid();
+       guest_test_msrs_access();
 
        pr_info("Testing access to Hyper-V hypercalls\n");
-       guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
-                                addr_gva2hva(vm, hcall_page),
-                                addr_gva2hva(vm, hcall_page) + getpagesize(),
-                                best);
-
-       kvm_vm_free(vm);
+       guest_test_hcalls_access();
 }
index 5ba325cd64bfd80c02dd3ff441bb5330070700b2..29b18d565cf4ce1cfbbce5b29e4298dc781e99c1 100644 (file)
@@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es)
        return vm;
 }
 
-static struct kvm_vm *__vm_create(void)
+static struct kvm_vm *aux_vm_create(bool with_vcpus)
 {
        struct kvm_vm *vm;
        int i;
 
        vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+       if (!with_vcpus)
+               return vm;
+
        for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
                vm_vcpu_add(vm, i);
 
@@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es)
 {
        struct kvm_vm *src_vm;
        struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
-       int i;
+       int i, ret;
 
        src_vm = sev_vm_create(es);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
-               dst_vms[i] = __vm_create();
+               dst_vms[i] = aux_vm_create(true);
 
        /* Initial migration from the src to the first dst. */
        sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
@@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es)
                sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
 
        /* Migrate the guest back to the original VM. */
-       sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+       TEST_ASSERT(ret == -1 && errno == EIO,
+                   "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+                   errno);
 
        kvm_vm_free(src_vm);
        for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
@@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void)
 
        for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
                pthread_join(pt[i], NULL);
+       for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+               kvm_vm_free(input[i].vm);
 }
 
 static void test_sev_migrate_parameters(void)
@@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void)
        sev_vm = sev_vm_create(/* es= */ false);
        sev_es_vm = sev_vm_create(/* es= */ true);
        vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
-       vm_no_sev = __vm_create();
+       vm_no_sev = aux_vm_create(true);
        sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
        sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
        vm_vcpu_add(sev_es_vm_no_vmsa, 1);
 
-
        ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
        TEST_ASSERT(
                ret == -1 && errno == EINVAL,
@@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void)
        TEST_ASSERT(ret == -1 && errno == EINVAL,
                    "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
                    errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(sev_es_vm_no_vmsa);
+       kvm_vm_free(vm_no_vcpu);
+       kvm_vm_free(vm_no_sev);
+}
+
+static int __sev_mirror_create(int dst_fd, int src_fd)
+{
+       struct kvm_enable_cap cap = {
+               .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
+               .args = { src_fd }
+       };
+
+       return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_mirror_create(int dst_fd, int src_fd)
+{
+       int ret;
+
+       ret = __sev_mirror_create(dst_fd, src_fd);
+       TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_mirror(bool es)
+{
+       struct kvm_vm *src_vm, *dst_vm;
+       struct kvm_sev_launch_start start = {
+               .policy = es ? SEV_POLICY_ES : 0
+       };
+       int i;
+
+       src_vm = sev_vm_create(es);
+       dst_vm = aux_vm_create(false);
+
+       sev_mirror_create(dst_vm->fd, src_vm->fd);
+
+       /* Check that we can complete creation of the mirror VM.  */
+       for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+               vm_vcpu_add(dst_vm, i);
+       sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
+       if (es)
+               sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+
+       kvm_vm_free(src_vm);
+       kvm_vm_free(dst_vm);
+}
+
+static void test_sev_mirror_parameters(void)
+{
+       struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       sev_es_vm = sev_vm_create(/* es= */ true);
+       vm_with_vcpu = aux_vm_create(true);
+       vm_no_vcpu = aux_vm_create(false);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to self. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+               ret, errno);
+
+       ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+       TEST_ASSERT(ret == -1 && errno == EINVAL,
+                   "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+                   errno);
+
+       ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+       TEST_ASSERT(
+               ret == -1 && errno == EINVAL,
+               "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+               ret, errno);
+
+       kvm_vm_free(sev_vm);
+       kvm_vm_free(sev_es_vm);
+       kvm_vm_free(vm_with_vcpu);
+       kvm_vm_free(vm_no_vcpu);
+}
+
+static void test_sev_move_copy(void)
+{
+       struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
+       int ret;
+
+       sev_vm = sev_vm_create(/* es= */ false);
+       dst_vm = aux_vm_create(true);
+       mirror_vm = aux_vm_create(false);
+       dst_mirror_vm = aux_vm_create(false);
+
+       sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /* The mirror itself can be migrated.  */
+       sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
+       ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+       TEST_ASSERT(ret == -1 && errno == EBUSY,
+                   "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+                   errno);
+
+       /*
+        * mirror_vm is not a mirror anymore, dst_mirror_vm is.  Thus,
+        * the owner can be copied as soon as dst_mirror_vm is gone.
+        */
+       kvm_vm_free(dst_mirror_vm);
+       sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+       kvm_vm_free(mirror_vm);
+       kvm_vm_free(dst_vm);
+       kvm_vm_free(sev_vm);
 }
 
 int main(int argc, char *argv[])
 {
-       test_sev_migrate_from(/* es= */ false);
-       test_sev_migrate_from(/* es= */ true);
-       test_sev_migrate_locking();
-       test_sev_migrate_parameters();
+       if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+               test_sev_migrate_from(/* es= */ false);
+               test_sev_migrate_from(/* es= */ true);
+               test_sev_migrate_locking();
+               test_sev_migrate_parameters();
+               if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+                       test_sev_move_copy();
+       }
+       if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+               test_sev_mirror(/* es= */ false);
+               test_sev_mirror(/* es= */ true);
+               test_sev_mirror_parameters();
+       }
        return 0;
 }
index df04f56ce859a0b40b770228dc72516190c5f526..30a81038df460481efe853a722eded2205b5a90a 100644 (file)
@@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm)
        vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
 
        /* No intercepts for real and virtual interrupts */
-       vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+       vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
 
        /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
        vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
new file mode 100644 (file)
index 0000000..e4bef2e
--- /dev/null
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID                        1
+
+static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+{
+       unsigned long end;
+
+       if (count == 2)
+               end = (unsigned long)buffer + 1;
+       else
+               end = (unsigned long)buffer + 8192;
+
+       asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
+       GUEST_ASSERT_1(count == 0, count);
+       GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+}
+
+static void guest_code(void)
+{
+       uint8_t buffer[8192];
+       int i;
+
+       /*
+        * Special case tests.  main() will adjust RCX 2 => 1 and 3 => 8192 to
+        * test that KVM doesn't explode when userspace modifies the "count" on
+        * a userspace I/O exit.  KVM isn't required to play nice with the I/O
+        * itself as KVM doesn't support manipulating the count, it just needs
+        * to not explode or overflow a buffer.
+        */
+       guest_ins_port80(buffer, 2);
+       guest_ins_port80(buffer, 3);
+
+       /* Verify KVM fills the buffer correctly when not stuffing RCX. */
+       memset(buffer, 0, sizeof(buffer));
+       guest_ins_port80(buffer, 8192);
+       for (i = 0; i < 8192; i++)
+               GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_regs regs;
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+       struct ucall uc;
+       int rc;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+       run = vcpu_state(vm, VCPU_ID);
+
+       memset(&regs, 0, sizeof(regs));
+
+       while (1) {
+               rc = _vcpu_run(vm, VCPU_ID);
+
+               TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               if (get_ucall(vm, VCPU_ID, &uc))
+                       break;
+
+               TEST_ASSERT(run->io.port == 0x80,
+                           "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+
+               /*
+                * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
+                * Note, this abuses KVM's batching of rep string I/O to avoid
+                * getting stuck in an infinite loop.  That behavior isn't in
+                * scope from a testing perspective as it's not ABI in any way,
+                * i.e. it really is abusing internal KVM knowledge.
+                */
+               vcpu_regs_get(vm, VCPU_ID, &regs);
+               if (regs.rcx == 2)
+                       regs.rcx = 1;
+               if (regs.rcx == 3)
+                       regs.rcx = 8192;
+               memset((void *)run + run->io.data_offset, 0xaa, 4096);
+               vcpu_regs_set(vm, VCPU_ID, &regs);
+       }
+
+       switch (uc.cmd) {
+       case UCALL_DONE:
+               break;
+       case UCALL_ABORT:
+               TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
+                         (const char *)uc.args[0], __FILE__, uc.args[1],
+                         uc.args[2], uc.args[3]);
+       default:
+               TEST_FAIL("Unknown ucall %lu", uc.cmd);
+       }
+
+       kvm_vm_free(vm);
+       return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
new file mode 100644 (file)
index 0000000..489fbed
--- /dev/null
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID        0
+#define ARBITRARY_IO_PORT 0x2000
+
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+       /*
+        * Generate an exit to L0 userspace, i.e. main(), via I/O to an
+        * arbitrary port.
+        */
+       asm volatile("inb %%dx, %%al"
+                    : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+       GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_ASSERT(load_vmcs(vmx_pages));
+
+       /* Prepare the VMCS for L2 execution. */
+       prepare_vmcs(vmx_pages, l2_guest_code,
+                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       /*
+        * L2 must be run without unrestricted guest, verify that the selftests
+        * library hasn't enabled it.  Because KVM selftests jump directly to
+        * 64-bit mode, unrestricted guest support isn't required.
+        */
+       GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ||
+                    !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST));
+
+       GUEST_ASSERT(!vmlaunch());
+
+       /* L2 should triple fault after main() stuffs invalid guest state. */
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t vmx_pages_gva;
+       struct kvm_sregs sregs;
+       struct kvm_run *run;
+       struct ucall uc;
+
+       nested_vmx_check_supported();
+
+       vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+       /* Allocate VMX pages and shared descriptors (vmx_pages). */
+       vcpu_alloc_vmx(vm, &vmx_pages_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+       vcpu_run(vm, VCPU_ID);
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       /*
+        * The first exit to L0 userspace should be an I/O access from L2.
+        * Running L1 should launch L2 without triggering an exit to userspace.
+        */
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                   "Expected KVM_EXIT_IO, got: %u (%s)\n",
+                   run->exit_reason, exit_reason_str(run->exit_reason));
+
+       TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
+                   "Expected IN from port %d from L2, got port %d",
+                   ARBITRARY_IO_PORT, run->io.port);
+
+       /*
+        * Stuff invalid guest state for L2 by making TR unusuable.  The next
+        * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
+        * emulating invalid guest state for L2.
+        */
+       memset(&sregs, 0, sizeof(sregs));
+       vcpu_sregs_get(vm, VCPU_ID, &sregs);
+       sregs.tr.unusable = 1;
+       vcpu_sregs_set(vm, VCPU_ID, &sregs);
+
+       vcpu_run(vm, VCPU_ID);
+
+       switch (get_ucall(vm, VCPU_ID, &uc)) {
+       case UCALL_DONE:
+               break;
+       case UCALL_ABORT:
+               TEST_FAIL("%s", (const char *)uc.args[0]);
+       default:
+               TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+       }
+}
index 23051d84b9078a7177df9d1c505031bf2bbf0ccf..2454a1f2ca0c251532e93905f42d00fcdc1755aa 100644 (file)
@@ -110,22 +110,5 @@ int main(int argc, char *argv[])
        ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
        TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
 
-       /* testcase 4, set capabilities when we don't have PDCM bit */
-       entry_1_0->ecx &= ~X86_FEATURE_PDCM;
-       vcpu_set_cpuid(vm, VCPU_ID, cpuid);
-       ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
-       TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
-       /* testcase 5, set capabilities when we don't have PMU version bits */
-       entry_1_0->ecx |= X86_FEATURE_PDCM;
-       eax.split.version_id = 0;
-       entry_1_0->ecx = eax.full;
-       vcpu_set_cpuid(vm, VCPU_ID, cpuid);
-       ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
-       TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
-       vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
-       ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
-
        kvm_vm_free(vm);
 }
index 3313566ce9062e285a9872ac2ce6c2ab3c1bc652..ad2982b72e02b9d580a97958188d1d3507a1bb05 100755 (executable)
@@ -455,6 +455,22 @@ cleanup()
        ip netns del ${NSC} >/dev/null 2>&1
 }
 
+cleanup_vrf_dup()
+{
+       ip link del ${NSA_DEV2} >/dev/null 2>&1
+       ip netns pids ${NSC} | xargs kill 2>/dev/null
+       ip netns del ${NSC} >/dev/null 2>&1
+}
+
+setup_vrf_dup()
+{
+       # some VRF tests use ns-C which has the same config as
+       # ns-B but for a device NOT in the VRF
+       create_ns ${NSC} "-" "-"
+       connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
+                  ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
+}
+
 setup()
 {
        local with_vrf=${1}
@@ -484,12 +500,6 @@ setup()
 
                ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV}
                ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV}
-
-               # some VRF tests use ns-C which has the same config as
-               # ns-B but for a device NOT in the VRF
-               create_ns ${NSC} "-" "-"
-               connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
-                          ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
        else
                ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV}
                ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV}
@@ -1240,7 +1250,9 @@ ipv4_tcp_vrf()
        log_test_addr ${a} $? 1 "Global server, local connection"
 
        # run MD5 tests
+       setup_vrf_dup
        ipv4_tcp_md5
+       cleanup_vrf_dup
 
        #
        # enable VRF global server
@@ -1798,8 +1810,9 @@ ipv4_addr_bind_vrf()
        for a in ${NSA_IP} ${VRF_IP}
        do
                log_start
+               show_hint "Socket not bound to VRF, but address is in VRF"
                run_cmd nettest -s -R -P icmp -l ${a} -b
-               log_test_addr ${a} $? 0 "Raw socket bind to local address"
+               log_test_addr ${a} $? 1 "Raw socket bind to local address"
 
                log_start
                run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b
@@ -2191,7 +2204,7 @@ ipv6_ping_vrf()
                log_start
                show_hint "Fails since VRF device does not support linklocal or multicast"
                run_cmd ${ping6} -c1 -w1 ${a}
-               log_test_addr ${a} $? 2 "ping out, VRF bind"
+               log_test_addr ${a} $? 1 "ping out, VRF bind"
        done
 
        for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV}
@@ -2719,7 +2732,9 @@ ipv6_tcp_vrf()
        log_test_addr ${a} $? 1 "Global server, local connection"
 
        # run MD5 tests
+       setup_vrf_dup
        ipv6_tcp_md5
+       cleanup_vrf_dup
 
        #
        # enable VRF global server
@@ -3414,11 +3429,14 @@ ipv6_addr_bind_novrf()
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
        log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind"
 
+       # Sadly, the kernel allows binding a socket to a device and then
+       # binding to an address not on the device. So this test passes
+       # when it really should not
        a=${NSA_LO_IP6}
        log_start
-       show_hint "Should fail with 'Cannot assign requested address'"
+       show_hint "Tecnically should fail since address is not on device but kernel allows"
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
-       log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address"
+       log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address"
 }
 
 ipv6_addr_bind_vrf()
@@ -3459,10 +3477,15 @@ ipv6_addr_bind_vrf()
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
        log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind"
 
+       # Sadly, the kernel allows binding a socket to a device and then
+       # binding to an address not on the device. The only restriction
+       # is that the address is valid in the L3 domain. So this test
+       # passes when it really should not
        a=${VRF_IP6}
        log_start
+       show_hint "Tecnically should fail since address is not on device but kernel allows"
        run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
-       log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind"
+       log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind"
 
        a=${NSA_LO_IP6}
        log_start
@@ -4002,8 +4025,8 @@ EOF
 ################################################################################
 # main
 
-TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
-TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
+TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
+TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
 TESTS_OTHER="use_cases"
 
 PAUSE_ON_FAIL=no
@@ -4077,3 +4100,11 @@ cleanup 2>/dev/null
 
 printf "\nTests passed: %3d\n" ${nsuccess}
 printf "Tests failed: %3d\n"   ${nfail}
+
+if [ $nfail -ne 0 ]; then
+       exit 1 # KSFT_FAIL
+elif [ $nsuccess -eq 0 ]; then
+       exit $ksft_skip
+fi
+
+exit 0 # KSFT_PASS
index 5abe92d55b696af2f45291752d48c03f378f3d96..996af1ae3d3ddb4cf4f97eb0dec71c9b99c007f2 100755 (executable)
@@ -444,24 +444,63 @@ fib_rp_filter_test()
        setup
 
        set -e
+       ip netns add ns2
+       ip netns set ns2 auto
+
+       ip -netns ns2 link set dev lo up
+
+       $IP link add name veth1 type veth peer name veth2
+       $IP link set dev veth2 netns ns2
+       $IP address add 192.0.2.1/24 dev veth1
+       ip -netns ns2 address add 192.0.2.1/24 dev veth2
+       $IP link set dev veth1 up
+       ip -netns ns2 link set dev veth2 up
+
        $IP link set dev lo address 52:54:00:6a:c7:5e
-       $IP link set dummy0 address 52:54:00:6a:c7:5e
-       $IP link add dummy1 type dummy
-       $IP link set dummy1 address 52:54:00:6a:c7:5e
-       $IP link set dev dummy1 up
+       $IP link set dev veth1 address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
+       ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
+
+       # 1. (ns2) redirect lo's egress to veth2's egress
+       ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth2
+       ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth2
+
+       # 2. (ns1) redirect veth1's ingress to lo's ingress
+       $NS_EXEC tc qdisc add dev veth1 ingress
+       $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
+       # 3. (ns1) redirect lo's egress to veth1's egress
+       $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
+       $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
+               action mirred egress redirect dev veth1
+       $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
+               action mirred egress redirect dev veth1
+
+       # 4. (ns2) redirect veth2's ingress to lo's ingress
+       ip netns exec ns2 tc qdisc add dev veth2 ingress
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
+               action mirred ingress redirect dev lo
+       ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
+               action mirred ingress redirect dev lo
+
        $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
        $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
-
-       $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
-       $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
+       ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
        set +e
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
        log_test $? 0 "rp_filter passes local packets"
 
-       run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+       run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
        log_test $? 0 "rp_filter passes loopback packets"
 
        cleanup
index bf17e485684f0d3b3f1cba27f3d1d250ce15ee24..b0980a2efa3172562361548f80ea4f615b1a3363 100644 (file)
@@ -13,6 +13,8 @@ NETIFS[p5]=veth4
 NETIFS[p6]=veth5
 NETIFS[p7]=veth6
 NETIFS[p8]=veth7
+NETIFS[p9]=veth8
+NETIFS[p10]=veth9
 
 # Port that does not have a cable connected.
 NETIF_NO_CABLE=eth8
index ecbf57f264ed937344deef8abf1707e8ac222c5b..7b9d6e31b8e7d7d4afccdebdf6492f46fa611674 100755 (executable)
@@ -311,7 +311,7 @@ check_exception()
                ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \
                grep -E -v 'mtu|redirected' | grep -q "cache"
        fi
-       log_test $? 0 "IPv4: ${desc}"
+       log_test $? 0 "IPv4: ${desc}" 0
 
        # No PMTU info for test "redirect" and "mtu exception plus redirect"
        if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
index 8a22db0cca496a62d4b452d6fc190cb13f60f99f..6e468e0f42f7844ffb8d10509b8370cc9382ba3e 100644 (file)
@@ -31,6 +31,8 @@ struct tls_crypto_info_keys {
                struct tls12_crypto_info_chacha20_poly1305 chacha20;
                struct tls12_crypto_info_sm4_gcm sm4gcm;
                struct tls12_crypto_info_sm4_ccm sm4ccm;
+               struct tls12_crypto_info_aes_ccm_128 aesccm128;
+               struct tls12_crypto_info_aes_gcm_256 aesgcm256;
        };
        size_t len;
 };
@@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
                tls12->sm4ccm.info.version = tls_version;
                tls12->sm4ccm.info.cipher_type = cipher_type;
                break;
+       case TLS_CIPHER_AES_CCM_128:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
+               tls12->aesccm128.info.version = tls_version;
+               tls12->aesccm128.info.cipher_type = cipher_type;
+               break;
+       case TLS_CIPHER_AES_GCM_256:
+               tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
+               tls12->aesgcm256.info.version = tls_version;
+               tls12->aesgcm256.info.cipher_type = cipher_type;
+               break;
        default:
                break;
        }
@@ -261,6 +273,30 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
        .cipher_type = TLS_CIPHER_SM4_CCM,
 };
 
+FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
+{
+       .tls_version = TLS_1_2_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
+{
+       .tls_version = TLS_1_3_VERSION,
+       .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
 FIXTURE_SETUP(tls)
 {
        struct tls_crypto_info_keys tls12;
index 710ac956bdb33fae80b1cda79c9dc57370dbcc82..c5489341cfb80487375e2b2ceeb21c9fd0aa035e 100644 (file)
@@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv)
        bool have_toeplitz = false;
        int index, c;
 
-       while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) {
+       while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) {
                switch (c) {
                case '4':
                        cfg_family = AF_INET;
index 91f3ef0f1192cb2a5769977394040eb72594927e..8b5ea923458828247d4264fca22dc31298fcb14e 100755 (executable)
@@ -150,11 +150,27 @@ EOF
 # oifname is the vrf device.
 test_masquerade_vrf()
 {
+       local qdisc=$1
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc add dev tvrf root $qdisc
+       fi
+
        ip netns exec $ns0 conntrack -F 2>/dev/null
 
 ip netns exec $ns0 nft -f - <<EOF
 flush ruleset
 table ip nat {
+       chain rawout {
+               type filter hook output priority raw;
+
+               oif tvrf ct state untracked counter
+       }
+       chain postrouting2 {
+               type filter hook postrouting priority mangle;
+
+               oif tvrf ct state untracked counter
+       }
        chain postrouting {
                type nat hook postrouting priority 0;
                # NB: masquerade should always be combined with 'oif(name) bla',
@@ -171,13 +187,18 @@ EOF
        fi
 
        # must also check that nat table was evaluated on second (lower device) iteration.
-       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+       ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
+       ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
        if [ $? -eq 0 ]; then
-               echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device"
+               echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
        else
-               echo "FAIL: vrf masq rule has unexpected counter value"
+               echo "FAIL: vrf rules have unexpected counter value"
                ret=1
        fi
+
+       if [ "$qdisc" != "default" ]; then
+               tc -net $ns0 qdisc del dev tvrf root
+       fi
 }
 
 # add masq rule that gets evaluated w. outif set to veth device.
@@ -213,7 +234,8 @@ EOF
 }
 
 test_ct_zone_in
-test_masquerade_vrf
+test_masquerade_vrf "default"
+test_masquerade_vrf "pfifo"
 test_masquerade_veth
 
 exit $ret
index 5a4938d6dcf25a3f8137be799091f4f0d16ad7fd..ed61f6cab60f4a933d2e8728889cafdc44d7425c 100755 (executable)
@@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout"
 
 # Set types, defined by TYPE_ variables below
 TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
-       net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
-       net_port_mac_proto_net"
+       net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp
+       net6_port_net6_port net_port_mac_proto_net"
 
 # Reported bugs, also described by TYPE_ variables below
 BUGS="flush_remove_add"
@@ -277,6 +277,23 @@ perf_entries       1000
 perf_proto     ipv4
 "
 
+TYPE_mac_net="
+display                mac,net
+type_spec      ether_addr . ipv4_addr
+chain_spec     ether saddr . ip saddr
+dst             
+src            mac addr4
+start          1
+count          5
+src_delta      2000
+tools          sendip nc bash
+proto          udp
+
+race_repeat    0
+
+perf_duration  0
+"
+
 TYPE_net_mac_icmp="
 display                net,mac - ICMP
 type_spec      ipv4_addr . ether_addr
@@ -984,7 +1001,8 @@ format() {
                fi
        done
        for f in ${src}; do
-               __expr="${__expr} . "
+               [ "${__expr}" != "{ " ] && __expr="${__expr} . "
+
                __start="$(eval format_"${f}" "${srcstart}")"
                __end="$(eval format_"${f}" "${srcend}")"
 
index ac646376eb014635e6b8092b4152f67b99312da3..04633119b29a0a85ef7675968b4a1efdb48df213 100755 (executable)
@@ -18,11 +18,17 @@ cleanup()
        ip netns del $ns
 }
 
-ip netns add $ns
-if [ $? -ne 0 ];then
-       echo "SKIP: Could not create net namespace $gw"
-       exit $ksft_skip
-fi
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "socat -V" "run test without socat tool"
+checktool "ip netns add $ns" "create net namespace"
 
 trap cleanup EXIT
 
@@ -71,7 +77,8 @@ EOF
                local start=$(date +%s%3N)
                i=$((i + 10000))
                j=$((j + 1))
-               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+               # nft rule in output places each packet in a different zone.
+               dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
                if [ $? -ne 0 ] ;then
                        ret=1
                        break
index b71828df5a6ddbae7e1861722ac4f9f9651ec6a4..a3239d5e40c79e9683b0626f7666d25e0f169260 100644 (file)
@@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
 CONFIG_NET_SCH_ETS=m
 CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NETDEVSIM=m
 
 #
 ## Network testing
index a3e43189d94003db8170a91f7d9b71ab4f1a407d..ee22e3447ec7e294378c9c7ac4c523cdfee12634 100755 (executable)
@@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining):
         list_test_cases(alltests)
         exit(0)
 
+    exit_code = 0 # KSFT_PASS
     if len(alltests):
         req_plugins = pm.get_required_plugins(alltests)
         try:
@@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining):
             print('The following plugins were not found:')
             print('{}'.format(pde.missing_pg))
         catresults = test_runner(pm, args, alltests)
+        if catresults.count_failures() != 0:
+            exit_code = 1 # KSFT_FAIL
         if args.format == 'none':
             print('Test results output suppression requested\n')
         else:
@@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining):
                         gid=int(os.getenv('SUDO_GID')))
     else:
         print('No tests found\n')
+        exit_code = 4 # KSFT_SKIP
+    exit(exit_code)
 
 def main():
     """
@@ -767,8 +772,5 @@ def main():
 
     set_operation_mode(pm, parser, args, remaining)
 
-    exit(0)
-
-
 if __name__ == "__main__":
     main()
index 7fe38c76db4473db3a85d3a3750c8d852a3ca284..afb0cd86fa3df17777d54e19328da3197b7ae9a9 100755 (executable)
@@ -1,5 +1,6 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+modprobe netdevsim
 ./tdc.py -c actions --nobuildebpf
 ./tdc.py -c qdisc
index ebc4ee0fe179ff1c135602b4cb332c05293dd18b..8a9461aa0878a0b6ea74fc9ec48f370846e39397 100755 (executable)
@@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2
 n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
 ip2 link del wg0
 ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
 
 ip0 link del wg1
 ip1 link del wg0
@@ -609,6 +613,28 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
 # Ensure there aren't circular reference loops
 ip1 link add wg1 type wireguard
 ip2 link add wg2 type wireguard
@@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
 done < /dev/kmsg
 alldeleted=1
 for object in "${!objects[@]}"; do
-       if [[ ${objects["$object"]} != *createddestroyed ]]; then
+       if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
                echo "Error: $object: merely ${objects["$object"]}" >&3
                alldeleted=0
        fi
index fe07d97df9fa89044d6493452226b0d31c3534c1..2b321b8a96cf3cc67e15e75461fd5c747307055a 100644 (file)
@@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_TRACE_IRQFLAGS=y
 CONFIG_DEBUG_BUGVERBOSE=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_PLIST=y
 CONFIG_PROVE_RCU=y
 CONFIG_SPARSE_RCU_POINTER=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=21
index 74db83a0aedd8b67be991e0f856c53ec6ec7c9cd..a9b5a520a1d22e7de62729bf27f746993049d595 100644 (file)
@@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_LOG_BUF_SHIFT=18
 CONFIG_PRINTK_TIME=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_LEGACY_VSYSCALL_NONE=y
index 9646bb9112c101ea0398f7d447bd563d2008de50..72c4e6b393896aa9f4a7fa3531151ee8df3d1268 100644 (file)
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
 
 static int kvm_set_memslot(struct kvm *kvm,
                           const struct kvm_userspace_memory_region *mem,
-                          struct kvm_memory_slot *old,
                           struct kvm_memory_slot *new, int as_id,
                           enum kvm_mr_change change)
 {
-       struct kvm_memory_slot *slot;
+       struct kvm_memory_slot *slot, old;
        struct kvm_memslots *slots;
        int r;
 
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
                 * Note, the INVALID flag needs to be in the appropriate entry
                 * in the freshly allocated memslots, not in @old or @new.
                 */
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags |= KVM_MEMSLOT_INVALID;
 
                /*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
                kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
        }
 
+       /*
+        * Make a full copy of the old memslot, the pointer will become stale
+        * when the memslots are re-sorted by update_memslots(), and the old
+        * memslot needs to be referenced after calling update_memslots(), e.g.
+        * to free its resources and for arch specific behavior.  This needs to
+        * happen *after* (re)acquiring slots_arch_lock.
+        */
+       slot = id_to_memslot(slots, new->id);
+       if (slot) {
+               old = *slot;
+       } else {
+               WARN_ON_ONCE(change != KVM_MR_CREATE);
+               memset(&old, 0, sizeof(old));
+               old.id = new->id;
+               old.as_id = as_id;
+       }
+
+       /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+       memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
        r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
        if (r)
                goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
        update_memslots(slots, new, change);
        slots = install_new_memslots(kvm, as_id, slots);
 
-       kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+       kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+       /* Free the old memslot's metadata.  Note, this is the full copy!!! */
+       if (change == KVM_MR_DELETE)
+               kvm_free_memslot(kvm, &old);
 
        kvfree(slots);
        return 0;
 
 out_slots:
        if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
-               slot = id_to_memslot(slots, old->id);
+               slot = id_to_memslot(slots, new->id);
                slot->flags &= ~KVM_MEMSLOT_INVALID;
                slots = install_new_memslots(kvm, as_id, slots);
        } else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
                              struct kvm_memory_slot *old, int as_id)
 {
        struct kvm_memory_slot new;
-       int r;
 
        if (!old->npages)
                return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
         */
        new.as_id = as_id;
 
-       r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
-       if (r)
-               return r;
-
-       kvm_free_memslot(kvm, old);
-       return 0;
+       return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
 }
 
 /*
@@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
        id = (u16)mem->slot;
 
        /* General sanity checks */
-       if (mem->memory_size & (PAGE_SIZE - 1))
+       if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+           (mem->memory_size != (unsigned long)mem->memory_size))
                return -EINVAL;
        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
                return -EINVAL;
@@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (!old.npages) {
                change = KVM_MR_CREATE;
                new.dirty_bitmap = NULL;
-               memset(&new.arch, 0, sizeof(new.arch));
        } else { /* Modify an existing slot. */
                if ((new.userspace_addr != old.userspace_addr) ||
                    (new.npages != old.npages) ||
@@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
                else /* Nothing to change. */
                        return 0;
 
-               /* Copy dirty_bitmap and arch from the current memslot. */
+               /* Copy dirty_bitmap from the current memslot. */
                new.dirty_bitmap = old.dirty_bitmap;
-               memcpy(&new.arch, &old.arch, sizeof(new.arch));
        }
 
        if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        bitmap_set(new.dirty_bitmap, 0, new.npages);
        }
 
-       r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+       r = kvm_set_memslot(kvm, mem, &new, as_id, change);
        if (r)
                goto out_bitmap;
 
@@ -2915,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -2952,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
-       BUG_ON(len + offset > ghc->len);
+       if (WARN_ON_ONCE(len + offset > ghc->len))
+               return -EINVAL;
 
        if (slots->generation != ghc->generation) {
                if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))